diff --git a/logs_oct12/eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation_20251013_213724.log b/logs_oct12/eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation_20251013_213724.log new file mode 100644 index 0000000000000000000000000000000000000000..3d289f983f1aeeaa9c49abbbbd33074c6b00c7b9 --- /dev/null +++ b/logs_oct12/eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation_20251013_213724.log @@ -0,0 +1,716 @@ +==== STARTING EXPERIMENT: eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation ==== +Log File: eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation_20251013_213724.log +Timestamp: 2025-10-13 21:37:24 +===================================== +Processing: /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 21:37:26,889] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +config_mask.torch_dtype: torch.bfloat16 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Load mask model from /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation over. +TinyLlavaConfig { + "architectures": [ + "TinyLlavaForConditionalGeneration" + ], + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.3, + "temperature_mlp": 1.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 21:38:08,887] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.5, + "temperature_mlp": 1.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 21:44:57,637] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.7, + "temperature_mlp": 1.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 21:51:03,414] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.9, + "temperature_mlp": 1.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 21:57:44,673] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.1, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.1, + "temperature_mlp": 2.1, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 22:05:29,491] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00 + main() + File "/nfs/ywang29/TinyLLaVA/scripts/apply_masks.py", line 123, in main + config_mask = TinyLlavaConfig.from_pretrained(model_args.mask_model_name_or_path) + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/configuration_utils.py", line 602, in from_pretrained + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation does not appear to have a file named config.json. Checkout 'https://huggingface.co//nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation/tree/main' for available files. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 22:12:24,641] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation/mask_applied'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/load_model.py", line 38, in load_pretrained_model + model = TinyLlavaForConditionalGeneration.from_pretrained(model_name_or_path,low_cpu_mem_usage=True) + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation/mask_applied'. Please provide either the path to a local folder or the repo_id of a model on the Hub. + +During handling of the above exception, another exception occurred: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation/mask_applied'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/opt/conda/envs/tinyllava/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/opt/conda/envs/tinyllava/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/nfs/ywang29/TinyLLaVA/tinyllava/eval/model_vqa_mmmu.py", line 180, in + eval_model(args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/eval/model_vqa_mmmu.py", line 88, in eval_model + model, tokenizer, image_processor, context_len = load_pretrained_model(model_path) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/load_model.py", line 40, in load_pretrained_model + model_config = TinyLlavaConfig.from_pretrained(model_name_or_path) + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/configuration_utils.py", line 602, in from_pretrained + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation/mask_applied'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/scripts/convert_answer_to_mmmu.py", line 31, in + eval_model(args) + File "/nfs/ywang29/TinyLLaVA/scripts/convert_answer_to_mmmu.py", line 7, in eval_model + answers = [json.loads(q) for q in open(os.path.expanduser(args.answers_file), "r")] +FileNotFoundError: [Errno 2] No such file or directory: '/s3-code/ywang29/datasets/tinyllava/eval/MMMU/answers/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation-mask_applied.jsonl' +Traceback (most recent call last): + File "/s3-code/ywang29/datasets/tinyllava/eval/MMMU/eval/main_eval_only.py", line 19, in + output_dict = json.load(open(args.output_path)) +FileNotFoundError: [Errno 2] No such file or directory: '/s3-code/ywang29/datasets/tinyllava/eval/MMMU/answers/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation-mask_applied_output.json' +==== EXPERIMENT COMPLETED: eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation ==== +Log File: eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation_20251013_221214.log +Timestamp: 2025-10-13 22:12:28 +===================================== diff --git a/logs_oct12/eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_221228.log b/logs_oct12/eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_221228.log new file mode 100644 index 0000000000000000000000000000000000000000..9c8905688e34e0524a15c34b8d0fda2667eb1611 --- /dev/null +++ b/logs_oct12/eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_221228.log @@ -0,0 +1,716 @@ +==== STARTING EXPERIMENT: eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation ==== +Log File: eval_qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_221228.log +Timestamp: 2025-10-13 22:12:28 +===================================== +Processing: /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 22:12:31,004] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +config_mask.torch_dtype: torch.bfloat16 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Load mask model from /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation over. +TinyLlavaConfig { + "architectures": [ + "TinyLlavaForConditionalGeneration" + ], + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.5, + "temperature_mlp": 2.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 22:13:12,187] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.7, + "temperature_mlp": 2.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 22:19:19,384] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.9, + "temperature_mlp": 2.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 22:26:17,847] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00", + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.7, + "temperature_mlp": 0.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "full", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Load base model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain over. +TinyLlavaConfig { + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": "<|endoftext|>", + "pad_token_id": 151643, + "resampler_hidden_size": 768, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "tie_word_embeddings": true, + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "full", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": 0, + "use_cache": true, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2Attention( + (q_proj): Linear(in_features=896, out_features=896, bias=True) + (k_proj): Linear(in_features=896, out_features=128, bias=True) + (v_proj): Linear(in_features=896, out_features=128, bias=True) + (o_proj): Linear(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=896, out_features=4864, bias=False) + (up_proj): Linear(in_features=896, out_features=4864, bias=False) + (down_proj): Linear(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): Linear(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): Linear(in_features=896, out_features=896, bias=True) + ) + ) +) +Collect masks for language model over. +Collect masks for connector over. +Applying mask on model.layers.0.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.q_proj. +Applying mask on model.layers.0.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.k_proj. +Applying mask on model.layers.0.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.v_proj. +Applying mask on model.layers.0.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.self_attn.o_proj. +Applying mask on model.layers.0.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.gate_proj. +Applying mask on model.layers.0.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.up_proj. +Applying mask on model.layers.0.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.0.mlp.down_proj. +Applying mask on model.layers.1.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.q_proj. +Applying mask on model.layers.1.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.k_proj. +Applying mask on model.layers.1.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.v_proj. +Applying mask on model.layers.1.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.self_attn.o_proj. +Applying mask on model.layers.1.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.gate_proj. +Applying mask on model.layers.1.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.up_proj. +Applying mask on model.layers.1.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.1.mlp.down_proj. +Applying mask on model.layers.2.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.q_proj. +Applying mask on model.layers.2.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.k_proj. +Applying mask on model.layers.2.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.v_proj. +Applying mask on model.layers.2.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.self_attn.o_proj. +Applying mask on model.layers.2.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.gate_proj. +Applying mask on model.layers.2.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.up_proj. +Applying mask on model.layers.2.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.2.mlp.down_proj. +Applying mask on model.layers.3.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.q_proj. +Applying mask on model.layers.3.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.k_proj. +Applying mask on model.layers.3.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.v_proj. +Applying mask on model.layers.3.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.self_attn.o_proj. +Applying mask on model.layers.3.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.gate_proj. +Applying mask on model.layers.3.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.up_proj. +Applying mask on model.layers.3.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.3.mlp.down_proj. +Applying mask on model.layers.4.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.q_proj. +Applying mask on model.layers.4.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.k_proj. +Applying mask on model.layers.4.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.v_proj. +Applying mask on model.layers.4.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.self_attn.o_proj. +Applying mask on model.layers.4.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.gate_proj. +Applying mask on model.layers.4.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.up_proj. +Applying mask on model.layers.4.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.4.mlp.down_proj. +Applying mask on model.layers.5.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.q_proj. +Applying mask on model.layers.5.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.k_proj. +Applying mask on model.layers.5.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.v_proj. +Applying mask on model.layers.5.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.self_attn.o_proj. +Applying mask on model.layers.5.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.gate_proj. +Applying mask on model.layers.5.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.up_proj. +Applying mask on model.layers.5.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.5.mlp.down_proj. +Applying mask on model.layers.6.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.q_proj. +Applying mask on model.layers.6.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.k_proj. +Applying mask on model.layers.6.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.v_proj. +Applying mask on model.layers.6.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.self_attn.o_proj. +Applying mask on model.layers.6.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.gate_proj. +Applying mask on model.layers.6.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.up_proj. +Applying mask on model.layers.6.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.6.mlp.down_proj. +Applying mask on model.layers.7.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.q_proj. +Applying mask on model.layers.7.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.k_proj. +Applying mask on model.layers.7.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.v_proj. +Applying mask on model.layers.7.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.self_attn.o_proj. +Applying mask on model.layers.7.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.gate_proj. +Applying mask on model.layers.7.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.up_proj. +Applying mask on model.layers.7.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.7.mlp.down_proj. +Applying mask on model.layers.8.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.q_proj. +Applying mask on model.layers.8.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.k_proj. +Applying mask on model.layers.8.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.v_proj. +Applying mask on model.layers.8.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.self_attn.o_proj. +Applying mask on model.layers.8.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.gate_proj. +Applying mask on model.layers.8.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.up_proj. +Applying mask on model.layers.8.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.8.mlp.down_proj. +Applying mask on model.layers.9.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.q_proj. +Applying mask on model.layers.9.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.k_proj. +Applying mask on model.layers.9.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.v_proj. +Applying mask on model.layers.9.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.self_attn.o_proj. +Applying mask on model.layers.9.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.gate_proj. +Applying mask on model.layers.9.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.up_proj. +Applying mask on model.layers.9.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.9.mlp.down_proj. +Applying mask on model.layers.10.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.q_proj. +Applying mask on model.layers.10.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.k_proj. +Applying mask on model.layers.10.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.v_proj. +Applying mask on model.layers.10.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.self_attn.o_proj. +Applying mask on model.layers.10.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.gate_proj. +Applying mask on model.layers.10.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.up_proj. +Applying mask on model.layers.10.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.10.mlp.down_proj. +Applying mask on model.layers.11.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.q_proj. +Applying mask on model.layers.11.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.k_proj. +Applying mask on model.layers.11.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.v_proj. +Applying mask on model.layers.11.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.self_attn.o_proj. +Applying mask on model.layers.11.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.gate_proj. +Applying mask on model.layers.11.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.up_proj. +Applying mask on model.layers.11.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.11.mlp.down_proj. +Applying mask on model.layers.12.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.q_proj. +Applying mask on model.layers.12.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.k_proj. +Applying mask on model.layers.12.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.v_proj. +Applying mask on model.layers.12.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.self_attn.o_proj. +Applying mask on model.layers.12.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.gate_proj. +Applying mask on model.layers.12.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.up_proj. +Applying mask on model.layers.12.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.12.mlp.down_proj. +Applying mask on model.layers.13.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.q_proj. +Applying mask on model.layers.13.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.k_proj. +Applying mask on model.layers.13.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.v_proj. +Applying mask on model.layers.13.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.self_attn.o_proj. +Applying mask on model.layers.13.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.gate_proj. +Applying mask on model.layers.13.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.up_proj. +Applying mask on model.layers.13.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.13.mlp.down_proj. +Applying mask on model.layers.14.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.q_proj. +Applying mask on model.layers.14.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.k_proj. +Applying mask on model.layers.14.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.v_proj. +Applying mask on model.layers.14.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.self_attn.o_proj. +Applying mask on model.layers.14.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.gate_proj. +Applying mask on model.layers.14.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.up_proj. +Applying mask on model.layers.14.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.14.mlp.down_proj. +Applying mask on model.layers.15.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.q_proj. +Applying mask on model.layers.15.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.k_proj. +Applying mask on model.layers.15.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.v_proj. +Applying mask on model.layers.15.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.self_attn.o_proj. +Applying mask on model.layers.15.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.gate_proj. +Applying mask on model.layers.15.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.up_proj. +Applying mask on model.layers.15.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.15.mlp.down_proj. +Applying mask on model.layers.16.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.q_proj. +Applying mask on model.layers.16.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.k_proj. +Applying mask on model.layers.16.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.v_proj. +Applying mask on model.layers.16.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.self_attn.o_proj. +Applying mask on model.layers.16.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.gate_proj. +Applying mask on model.layers.16.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.up_proj. +Applying mask on model.layers.16.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.16.mlp.down_proj. +Applying mask on model.layers.17.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.q_proj. +Applying mask on model.layers.17.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.k_proj. +Applying mask on model.layers.17.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.v_proj. +Applying mask on model.layers.17.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.self_attn.o_proj. +Applying mask on model.layers.17.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.gate_proj. +Applying mask on model.layers.17.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.up_proj. +Applying mask on model.layers.17.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.17.mlp.down_proj. +Applying mask on model.layers.18.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.q_proj. +Applying mask on model.layers.18.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.k_proj. +Applying mask on model.layers.18.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.v_proj. +Applying mask on model.layers.18.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.self_attn.o_proj. +Applying mask on model.layers.18.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.gate_proj. +Applying mask on model.layers.18.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.up_proj. +Applying mask on model.layers.18.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.18.mlp.down_proj. +Applying mask on model.layers.19.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.q_proj. +Applying mask on model.layers.19.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.k_proj. +Applying mask on model.layers.19.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.v_proj. +Applying mask on model.layers.19.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.self_attn.o_proj. +Applying mask on model.layers.19.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.gate_proj. +Applying mask on model.layers.19.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.up_proj. +Applying mask on model.layers.19.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.19.mlp.down_proj. +Applying mask on model.layers.20.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.q_proj. +Applying mask on model.layers.20.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.k_proj. +Applying mask on model.layers.20.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.v_proj. +Applying mask on model.layers.20.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.self_attn.o_proj. +Applying mask on model.layers.20.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.gate_proj. +Applying mask on model.layers.20.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.up_proj. +Applying mask on model.layers.20.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.20.mlp.down_proj. +Applying mask on model.layers.21.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.q_proj. +Applying mask on model.layers.21.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.k_proj. +Applying mask on model.layers.21.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.v_proj. +Applying mask on model.layers.21.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.self_attn.o_proj. +Applying mask on model.layers.21.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.gate_proj. +Applying mask on model.layers.21.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.up_proj. +Applying mask on model.layers.21.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.21.mlp.down_proj. +Applying mask on model.layers.22.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.q_proj. +Applying mask on model.layers.22.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.k_proj. +Applying mask on model.layers.22.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.v_proj. +Applying mask on model.layers.22.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.self_attn.o_proj. +Applying mask on model.layers.22.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.gate_proj. +Applying mask on model.layers.22.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.up_proj. +Applying mask on model.layers.22.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.22.mlp.down_proj. +Applying mask on model.layers.23.self_attn.q_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.q_proj. +Applying mask on model.layers.23.self_attn.k_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.k_proj. +Applying mask on model.layers.23.self_attn.v_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.v_proj. +Applying mask on model.layers.23.self_attn.o_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.self_attn.o_proj. +Applying mask on model.layers.23.mlp.gate_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.gate_proj. +Applying mask on model.layers.23.mlp.up_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.up_proj. +Applying mask on model.layers.23.mlp.down_proj with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on model.layers.23.mlp.down_proj. +Applying mask on _connector.0 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.0. +Applying mask on _connector.2 with dtype, mask_dtype=torch.bfloat16, module_dtype=torch.bfloat16 +Applied soft mask on _connector.2. +Using cleaned config_mask (without mask parameters) for saving. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 22:32:38,278] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. + 0%| | 0/900 [00:00, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 06:57:46,506] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 06:57:46,506] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 06:57:46,508] [INFO] [launch.py:253:main] process 574575 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 06:57:46,511] [INFO] [launch.py:253:main] process 574576 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 06:57:46,513] [INFO] [launch.py:253:main] process 574577 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 06:57:46,515] [INFO] [launch.py:253:main] process 574578 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 06:57:46,517] [INFO] [launch.py:253:main] process 574579 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 06:57:46,520] [INFO] [launch.py:253:main] process 574580 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 06:57:46,522] [INFO] [launch.py:253:main] process 574581 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 06:57:46,524] [INFO] [launch.py:253:main] process 574582 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 06:57:53,157] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 06:57:53,223] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 06:57:53,396] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 06:57:53,464] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 06:57:53,499] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 06:57:53,499] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 06:57:53,502] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 06:57:53,522] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 06:57:53,662] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 06:57:53,662] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 06:57:53,801] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 06:57:53,876] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 06:57:53,909] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 06:57:53,910] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 06:57:53,910] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 06:57:53,920] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 06:57:53,928] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.3, 'temperature_mlp': 1.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.3, + "temperature_mlp": 1.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:574575:574575 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574575:574575 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574575:574575 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:574575:574575 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:574575:574575 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:574575:574575 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:574577:574577 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:574577:574577 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574577:574577 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574578:574578 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:574578:574578 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574578:574578 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574577:574577 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:574577:574577 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:574577:574577 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:574578:574578 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:574578:574578 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:574578:574578 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:574581:574581 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:574581:574581 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574581:574581 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574581:574581 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:574581:574581 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:574581:574581 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:574579:574579 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:574579:574579 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574579:574579 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574579:574579 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:574579:574579 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:574579:574579 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:574580:574580 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:574580:574580 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574582:574582 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:574580:574580 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574582:574582 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574582:574582 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574580:574580 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:574580:574580 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:574580:574580 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:574582:574582 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:574582:574582 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:574582:574582 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:574576:574576 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:574576:574576 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574576:574576 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574576:574576 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:574576:574576 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:574576:574576 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO ncclCommInitRank comm 0x561ee00aa380 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5af29e0d9ea69a14 - Init START +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO ncclCommInitRank comm 0x559a43be2b00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5af29e0d9ea69a14 - Init START +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO ncclCommInitRank comm 0x5598ed559770 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5af29e0d9ea69a14 - Init START +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO ncclCommInitRank comm 0x55e9c9faf250 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5af29e0d9ea69a14 - Init START +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO ncclCommInitRank comm 0x55c4f7559e70 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5af29e0d9ea69a14 - Init START +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO ncclCommInitRank comm 0x55b06a2a6e60 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5af29e0d9ea69a14 - Init START +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO ncclCommInitRank comm 0x55f00acb7000 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5af29e0d9ea69a14 - Init START +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO ncclCommInitRank comm 0x55de541af0c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5af29e0d9ea69a14 - Init START +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO comm 0x55c4f7559e70 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO comm 0x55b06a2a6e60 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO comm 0x55e9c9faf250 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO comm 0x55f00acb7000 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO comm 0x55de541af0c0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO comm 0x561ee00aa380 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO comm 0x559a43be2b00 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO comm 0x5598ed559770 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:574580:576170 [5] NCCL INFO ncclCommInitRank comm 0x561ee00aa380 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5af29e0d9ea69a14 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574582:576169 [7] NCCL INFO ncclCommInitRank comm 0x55f00acb7000 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5af29e0d9ea69a14 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574581:576167 [6] NCCL INFO ncclCommInitRank comm 0x55de541af0c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5af29e0d9ea69a14 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:574579:576168 [4] NCCL INFO ncclCommInitRank comm 0x559a43be2b00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5af29e0d9ea69a14 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:574578:576166 [3] NCCL INFO ncclCommInitRank comm 0x5598ed559770 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5af29e0d9ea69a14 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574577:576165 [2] NCCL INFO ncclCommInitRank comm 0x55b06a2a6e60 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5af29e0d9ea69a14 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:574575:576164 [0] NCCL INFO ncclCommInitRank comm 0x55e9c9faf250 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5af29e0d9ea69a14 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574576:576171 [1] NCCL INFO ncclCommInitRank comm 0x55c4f7559e70 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5af29e0d9ea69a14 - Init COMPLETE +[2025-10-13 06:58:37,363] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 06:58:40,836] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin...Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... + +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 06:58:59,229 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 06:58:59,239 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:002->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:574578:581178 [3] NCCL INFO ncclCommInitRank comm 0x7f978c06b270 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x42200b50c1ac339 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574582:581179 [7] NCCL INFO ncclCommInitRank comm 0x7f680006b490 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x42200b50c1ac339 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574576:581182 [1] NCCL INFO ncclCommInitRank comm 0x7f08a0069ba0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x42200b50c1ac339 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574580:581183 [5] NCCL INFO ncclCommInitRank comm 0x7f0b0006b340 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x42200b50c1ac339 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574579:581184 [4] NCCL INFO ncclCommInitRank comm 0x7f6f3806ac10 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x42200b50c1ac339 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574575:581177 [0] NCCL INFO ncclCommInitRank comm 0x7efd6c06b4b0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x42200b50c1ac339 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574577:581180 [2] NCCL INFO ncclCommInitRank comm 0x7fa2dc06b1b0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x42200b50c1ac339 - Init COMPLETE +ywang29-vrdb-test1-worker-0:574581:581181 [6] NCCL INFO ncclCommInitRank comm 0x7f4c3006bb00 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x42200b50c1ac339 - Init COMPLETE + 0%| | 1/520 [00:14<2:03:00, 14.22s/it] {'loss': 2.1714, 'grad_norm': 0.03814839847206242, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:03:00, 14.22s/it] 0%| | 2/520 [00:17<1:08:56, 7.99s/it] {'loss': 2.1502, 'grad_norm': 0.04067677085973634, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:08:56, 7.99s/it] 1%| | 3/520 [00:21<51:42, 6.00s/it] {'loss': 1.7078, 'grad_norm': 0.015939706370972136, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:42, 6.00s/it] 1%| | 4/520 [00:25<43:34, 5.07s/it] {'loss': 1.5946, 'grad_norm': 0.0068932964270646754, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<43:34, 5.07s/it] 1%| | 5/520 [00:28<39:03, 4.55s/it] {'loss': 1.6227, 'grad_norm': 0.01014886973296801, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:03, 4.55s/it] 1%| | 6/520 [00:32<36:20, 4.24s/it] {'loss': 1.4231, 'grad_norm': 0.005890079859369931, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:20, 4.24s/it] 1%|▏ | 7/520 [00:36<34:35, 4.05s/it] {'loss': 1.4266, 'grad_norm': 0.005896559349666777, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<34:35, 4.05s/it] 2%|▏ | 8/520 [00:40<35:06, 4.11s/it] {'loss': 1.4832, 'grad_norm': 0.005227924105678162, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:06, 4.11s/it] 2%|▏ | 9/520 [00:44<35:14, 4.14s/it] {'loss': 1.5577, 'grad_norm': 0.006563212671957713, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<35:14, 4.14s/it] 2%|▏ | 10/520 [00:48<33:46, 3.97s/it] {'loss': 1.3749, 'grad_norm': 0.004731250654941844, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<33:46, 3.97s/it] 2%|▏ | 11/520 [00:51<33:16, 3.92s/it] {'loss': 1.4475, 'grad_norm': 0.0039738850997779445, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<33:16, 3.92s/it] 2%|▏ | 12/520 [00:55<32:29, 3.84s/it] {'loss': 1.3878, 'grad_norm': 0.00397221372500587, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<32:29, 3.84s/it][2025-10-13 07:00:04,085] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<33:39, 3.98s/it] {'loss': 1.4063, 'grad_norm': 0.003365932530092414, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<33:39, 3.98s/it] 3%|▎ | 14/520 [01:03<32:43, 3.88s/it] {'loss': 1.4577, 'grad_norm': 0.0037988365088866454, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<32:43, 3.88s/it] 3%|▎ | 15/520 [01:07<32:01, 3.80s/it] {'loss': 1.4456, 'grad_norm': 0.0032462572180738344, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:01, 3.80s/it] 3%|▎ | 16/520 [01:10<31:28, 3.75s/it] {'loss': 1.414, 'grad_norm': 0.0032287861085325514, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:10<31:28, 3.75s/it] 3%|▎ | 17/520 [01:14<31:07, 3.71s/it] {'loss': 1.5109, 'grad_norm': 0.0030201015118433458, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:14<31:07, 3.71s/it] 3%|▎ | 18/520 [01:17<30:48, 3.68s/it] {'loss': 1.385, 'grad_norm': 0.0032510411334095665, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:17<30:48, 3.68s/it] 4%|▎ | 19/520 [01:21<30:40, 3.67s/it] {'loss': 1.4241, 'grad_norm': 0.0031235910962774493, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:21<30:40, 3.67s/it] 4%|▍ | 20/520 [01:25<31:05, 3.73s/it] {'loss': 1.3793, 'grad_norm': 0.0033510654340645897, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:25<31:05, 3.73s/it] 4%|▍ | 21/520 [01:29<31:30, 3.79s/it] {'loss': 1.4093, 'grad_norm': 0.0031372161547092298, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<31:30, 3.79s/it] 4%|▍ | 22/520 [01:33<31:35, 3.81s/it] {'loss': 1.5217, 'grad_norm': 0.003248863936960784, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<31:35, 3.81s/it] 4%|▍ | 23/520 [01:37<31:34, 3.81s/it] {'loss': 1.4566, 'grad_norm': 0.002744630830615553, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<31:34, 3.81s/it] 5%|▍ | 24/520 [01:40<31:39, 3.83s/it] {'loss': 1.4108, 'grad_norm': 0.002813657376694583, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:40<31:39, 3.83s/it] 5%|▍ | 25/520 [01:44<31:43, 3.84s/it] {'loss': 1.4567, 'grad_norm': 0.003055599584845224, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<31:43, 3.84s/it] 5%|▌ | 26/520 [01:48<31:44, 3.86s/it] {'loss': 1.4517, 'grad_norm': 0.0025067944188097184, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<31:44, 3.86s/it] 5%|▌ | 27/520 [01:52<31:39, 3.85s/it] {'loss': 1.3732, 'grad_norm': 0.0030723577887423565, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<31:39, 3.85s/it] 5%|▌ | 28/520 [01:56<31:40, 3.86s/it] {'loss': 1.3779, 'grad_norm': 0.0029055764762655916, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<31:40, 3.86s/it] 6%|▌ | 29/520 [02:00<31:43, 3.88s/it] {'loss': 1.3927, 'grad_norm': 0.0028010695705970156, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<31:43, 3.88s/it] 6%|▌ | 30/520 [02:04<31:27, 3.85s/it] {'loss': 1.5006, 'grad_norm': 0.0028552235732370373, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:04<31:27, 3.85s/it] 6%|▌ | 31/520 [02:07<30:55, 3.79s/it] {'loss': 1.3721, 'grad_norm': 0.0025425699819593946, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<30:55, 3.79s/it] 6%|▌ | 32/520 [02:11<30:34, 3.76s/it] {'loss': 1.3718, 'grad_norm': 0.003301888676646615, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:11<30:34, 3.76s/it] 6%|▋ | 33/520 [02:15<30:15, 3.73s/it] {'loss': 1.3956, 'grad_norm': 0.003187700562540326, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:15<30:15, 3.73s/it] 7%|▋ | 34/520 [02:18<30:04, 3.71s/it] {'loss': 1.3757, 'grad_norm': 0.0030113010635178165, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<30:04, 3.71s/it] 7%|▋ | 35/520 [02:22<29:52, 3.70s/it] {'loss': 1.384, 'grad_norm': 0.002800497869582189, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:22<29:52, 3.70s/it] 7%|▋ | 36/520 [02:26<29:38, 3.67s/it] {'loss': 1.504, 'grad_norm': 0.0027821416973315334, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:26<29:38, 3.67s/it] 7%|▋ | 37/520 [02:29<29:31, 3.67s/it] {'loss': 1.4831, 'grad_norm': 0.002475955016841264, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:29<29:31, 3.67s/it] 7%|▋ | 38/520 [02:33<29:23, 3.66s/it] {'loss': 1.5787, 'grad_norm': 0.00271507020797863, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:33<29:23, 3.66s/it] 8%|▊ | 39/520 [02:37<29:19, 3.66s/it] {'loss': 1.4151, 'grad_norm': 0.0030041993464000214, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<29:19, 3.66s/it] 8%|▊ | 40/520 [02:40<29:10, 3.65s/it] {'loss': 1.4538, 'grad_norm': 0.0024019148948412187, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:40<29:10, 3.65s/it] 8%|▊ | 41/520 [02:44<29:06, 3.65s/it] {'loss': 1.4223, 'grad_norm': 0.0028430463589571154, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:44<29:06, 3.65s/it] 8%|▊ | 42/520 [02:47<28:56, 3.63s/it] {'loss': 1.4476, 'grad_norm': 0.0033940644869797274, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<28:56, 3.63s/it] 8%|▊ | 43/520 [02:51<28:55, 3.64s/it] {'loss': 1.4016, 'grad_norm': 0.0035925563157347016, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<28:55, 3.64s/it] 8%|▊ | 44/520 [02:55<28:52, 3.64s/it] {'loss': 1.4914, 'grad_norm': 0.0026264382574149206, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:55<28:52, 3.64s/it] 9%|▊ | 45/520 [02:58<28:49, 3.64s/it] {'loss': 1.4591, 'grad_norm': 0.002683211319764426, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<28:49, 3.64s/it] 9%|▉ | 46/520 [03:02<28:46, 3.64s/it] {'loss': 1.565, 'grad_norm': 0.0027241097926793143, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<28:46, 3.64s/it] 9%|▉ | 47/520 [03:06<28:40, 3.64s/it] {'loss': 1.4428, 'grad_norm': 0.0025015885738279937, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:06<28:40, 3.64s/it] 9%|▉ | 48/520 [03:09<28:33, 3.63s/it] {'loss': 1.4093, 'grad_norm': 0.0029370966734930046, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<28:33, 3.63s/it] 9%|▉ | 49/520 [03:13<28:46, 3.67s/it] {'loss': 1.4613, 'grad_norm': 0.0025682075162119437, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<28:46, 3.67s/it] 10%|▉ | 50/520 [03:17<29:09, 3.72s/it] {'loss': 1.4502, 'grad_norm': 0.0024960752555607, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:17<29:09, 3.72s/it] 10%|▉ | 51/520 [03:21<28:56, 3.70s/it] {'loss': 1.3748, 'grad_norm': 0.002816854564837006, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:21<28:56, 3.70s/it] 10%|█ | 52/520 [03:24<28:42, 3.68s/it] {'loss': 1.507, 'grad_norm': 0.0026101923107874625, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:24<28:42, 3.68s/it] 10%|█ | 53/520 [03:28<28:30, 3.66s/it] {'loss': 1.5028, 'grad_norm': 0.0027646598510270006, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:28<28:30, 3.66s/it] 10%|█ | 54/520 [03:31<28:19, 3.65s/it] {'loss': 1.3989, 'grad_norm': 0.002375376838643182, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<28:19, 3.65s/it] 11%|█ | 55/520 [03:35<28:14, 3.64s/it] {'loss': 1.3871, 'grad_norm': 0.002537493145379656, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:35<28:14, 3.64s/it] 11%|█ | 56/520 [03:39<28:18, 3.66s/it] {'loss': 1.5141, 'grad_norm': 0.0025207634856254197, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:39<28:18, 3.66s/it] 11%|█ | 57/520 [03:42<28:19, 3.67s/it] {'loss': 1.3719, 'grad_norm': 0.0031753337474815365, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<28:19, 3.67s/it] 11%|█ | 58/520 [03:46<28:08, 3.66s/it] {'loss': 1.5335, 'grad_norm': 0.0019763232451755586, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:46<28:08, 3.66s/it] 11%|█▏ | 59/520 [03:50<28:07, 3.66s/it] {'loss': 1.3657, 'grad_norm': 0.0025586344100365236, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:50<28:07, 3.66s/it] 12%|█▏ | 60/520 [03:53<27:55, 3.64s/it] {'loss': 1.4619, 'grad_norm': 0.003219037365566865, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:53<27:55, 3.64s/it] 12%|█▏ | 61/520 [03:57<27:50, 3.64s/it] {'loss': 1.4912, 'grad_norm': 0.0034946622258303865, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:57<27:50, 3.64s/it] 12%|█▏ | 62/520 [04:01<27:45, 3.64s/it] {'loss': 1.4328, 'grad_norm': 0.0026305275318732127, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:01<27:45, 3.64s/it] 12%|█▏ | 63/520 [04:04<27:39, 3.63s/it] {'loss': 1.4107, 'grad_norm': 0.002856674753413912, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:04<27:39, 3.63s/it] 12%|█▏ | 64/520 [04:08<27:31, 3.62s/it] {'loss': 1.464, 'grad_norm': 0.0024829672287541645, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:08<27:31, 3.62s/it] 12%|█▎ | 65/520 [04:11<27:35, 3.64s/it] {'loss': 1.4494, 'grad_norm': 0.0025565192596521207, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:11<27:35, 3.64s/it] 13%|█▎ | 66/520 [04:15<27:33, 3.64s/it] {'loss': 1.417, 'grad_norm': 0.003001393583908569, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:15<27:33, 3.64s/it] 13%|█▎ | 67/520 [04:19<27:33, 3.65s/it] {'loss': 1.3051, 'grad_norm': 0.0021485996931497296, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:19<27:33, 3.65s/it] 13%|█▎ | 68/520 [04:22<27:34, 3.66s/it] {'loss': 1.3659, 'grad_norm': 0.0022228401551197285, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:22<27:34, 3.66s/it] 13%|█▎ | 69/520 [04:26<27:27, 3.65s/it] {'loss': 1.3425, 'grad_norm': 0.0030842978039204473, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:26<27:27, 3.65s/it] 13%|█▎ | 70/520 [04:30<27:33, 3.67s/it] {'loss': 1.3919, 'grad_norm': 0.0026180165970355333, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:30<27:33, 3.67s/it] 14%|█▎ | 71/520 [04:34<27:29, 3.67s/it] {'loss': 1.3125, 'grad_norm': 0.0022132994870580807, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:34<27:29, 3.67s/it] 14%|█▍ | 72/520 [04:37<27:20, 3.66s/it] {'loss': 1.4632, 'grad_norm': 0.002712484977588332, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:37<27:20, 3.66s/it] 14%|█▍ | 73/520 [04:41<27:22, 3.68s/it] {'loss': 1.2917, 'grad_norm': 0.0022787297804940662, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:41<27:22, 3.68s/it] 14%|█▍ | 74/520 [04:45<27:17, 3.67s/it] {'loss': 1.4093, 'grad_norm': 0.0024647144646780545, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:45<27:17, 3.67s/it] 14%|█▍ | 75/520 [04:48<27:10, 3.66s/it] {'loss': 1.3163, 'grad_norm': 0.002441223784435101, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:48<27:10, 3.66s/it] 15%|█▍ | 76/520 [04:52<27:10, 3.67s/it] {'loss': 1.539, 'grad_norm': 0.003294189930559694, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:52<27:10, 3.67s/it] 15%|█▍ | 77/520 [04:56<27:27, 3.72s/it] {'loss': 1.232, 'grad_norm': 0.0024181096877143, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:56<27:27, 3.72s/it] 15%|█▌ | 78/520 [05:00<27:48, 3.77s/it] {'loss': 1.3677, 'grad_norm': 0.002595047787449542, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:00<27:48, 3.77s/it] 15%|█▌ | 79/520 [05:03<27:53, 3.79s/it] {'loss': 1.3477, 'grad_norm': 0.0025088093314904842, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:03<27:53, 3.79s/it] 15%|█▌ | 80/520 [05:07<27:57, 3.81s/it] {'loss': 1.5211, 'grad_norm': 0.0027886773804302817, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:07<27:57, 3.81s/it] 16%|█▌ | 81/520 [05:11<27:56, 3.82s/it] {'loss': 1.5011, 'grad_norm': 0.003481034335154321, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:11<27:56, 3.82s/it] 16%|█▌ | 82/520 [05:15<27:59, 3.84s/it] {'loss': 1.4348, 'grad_norm': 0.0023810124948324676, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:15<27:59, 3.84s/it] 16%|█▌ | 83/520 [05:19<27:56, 3.84s/it] {'loss': 1.4377, 'grad_norm': 0.0023352234015946924, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:19<27:56, 3.84s/it] 16%|█▌ | 84/520 [05:23<27:55, 3.84s/it] {'loss': 1.4623, 'grad_norm': 0.002856987942573144, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:23<27:55, 3.84s/it] 16%|█▋ | 85/520 [05:27<27:52, 3.84s/it] {'loss': 1.4694, 'grad_norm': 0.002282735502342565, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:27<27:52, 3.84s/it] 17%|█▋ | 86/520 [05:30<27:52, 3.85s/it] {'loss': 1.4893, 'grad_norm': 0.002480337700696306, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:30<27:52, 3.85s/it] 17%|█▋ | 87/520 [05:34<27:38, 3.83s/it] {'loss': 1.4641, 'grad_norm': 0.002258738262257446, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:34<27:38, 3.83s/it] 17%|█▋ | 88/520 [05:38<27:12, 3.78s/it] {'loss': 1.4303, 'grad_norm': 0.0024309958331379324, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:38<27:12, 3.78s/it] 17%|█▋ | 89/520 [05:41<26:50, 3.74s/it] {'loss': 1.4392, 'grad_norm': 0.0025354156691821833, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:41<26:50, 3.74s/it] 17%|█▋ | 90/520 [05:45<26:55, 3.76s/it] {'loss': 1.3739, 'grad_norm': 0.0024793183045657414, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:45<26:55, 3.76s/it] 18%|█▊ | 91/520 [05:49<27:03, 3.78s/it] {'loss': 1.4408, 'grad_norm': 0.0022106734516138993, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:49<27:03, 3.78s/it] 18%|█▊ | 92/520 [05:53<27:06, 3.80s/it] {'loss': 1.3771, 'grad_norm': 0.0022928097431455263, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:53<27:06, 3.80s/it] 18%|█▊ | 93/520 [05:57<27:16, 3.83s/it] {'loss': 1.3894, 'grad_norm': 0.0023543048478954797, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:57<27:16, 3.83s/it] 18%|█▊ | 94/520 [06:01<27:19, 3.85s/it] {'loss': 1.4816, 'grad_norm': 0.0028397974342021137, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:01<27:19, 3.85s/it] 18%|█▊ | 95/520 [06:05<27:19, 3.86s/it] {'loss': 1.3679, 'grad_norm': 0.0029613049041681333, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:05<27:19, 3.86s/it] 18%|█▊ | 96/520 [06:08<27:15, 3.86s/it] {'loss': 1.382, 'grad_norm': 0.002775261714490049, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:08<27:15, 3.86s/it] 19%|█▊ | 97/520 [06:12<27:09, 3.85s/it] {'loss': 1.3583, 'grad_norm': 0.0029672950526583083, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:12<27:09, 3.85s/it] 19%|█▉ | 98/520 [06:16<27:08, 3.86s/it] {'loss': 1.345, 'grad_norm': 0.002085699902403141, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:16<27:08, 3.86s/it] 19%|█▉ | 99/520 [06:20<27:11, 3.87s/it] {'loss': 1.371, 'grad_norm': 0.002569788801573435, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:20<27:11, 3.87s/it] 19%|█▉ | 100/520 [06:24<26:44, 3.82s/it] {'loss': 1.4402, 'grad_norm': 0.0026102749406244274, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:24<26:44, 3.82s/it] 19%|█▉ | 101/520 [06:27<26:23, 3.78s/it] {'loss': 1.3741, 'grad_norm': 0.0025066439097236343, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:27<26:23, 3.78s/it] 20%|█▉ | 102/520 [06:31<26:12, 3.76s/it] {'loss': 1.3837, 'grad_norm': 0.002431404843765263, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:31<26:12, 3.76s/it] 20%|█▉ | 103/520 [06:35<25:54, 3.73s/it] {'loss': 1.3005, 'grad_norm': 0.0019086657247981253, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:35<25:54, 3.73s/it] 20%|██ | 104/520 [06:39<25:42, 3.71s/it] {'loss': 1.3798, 'grad_norm': 0.0023146895051099382, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:39<25:42, 3.71s/it] 20%|██ | 105/520 [06:42<25:35, 3.70s/it] {'loss': 1.3722, 'grad_norm': 0.0022659614716657533, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:42<25:35, 3.70s/it] 20%|██ | 106/520 [06:46<25:30, 3.70s/it] {'loss': 1.4227, 'grad_norm': 0.0026261946424078323, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:46<25:30, 3.70s/it] 21%|██ | 107/520 [06:50<25:24, 3.69s/it] {'loss': 1.3884, 'grad_norm': 0.0022464927129622536, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:50<25:24, 3.69s/it] 21%|██ | 108/520 [06:53<25:19, 3.69s/it] {'loss': 1.3307, 'grad_norm': 0.002358669624623717, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:53<25:19, 3.69s/it] 21%|██ | 109/520 [06:57<25:13, 3.68s/it] {'loss': 1.3806, 'grad_norm': 0.002010251831957762, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:57<25:13, 3.68s/it] 21%|██ | 110/520 [07:01<25:08, 3.68s/it] {'loss': 1.524, 'grad_norm': 0.002164369898530802, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:01<25:08, 3.68s/it] 21%|██▏ | 111/520 [07:04<25:03, 3.68s/it] {'loss': 1.524, 'grad_norm': 0.0025084756295901717, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:04<25:03, 3.68s/it] 22%|██▏ | 112/520 [07:08<24:57, 3.67s/it] {'loss': 1.4094, 'grad_norm': 0.0021140771605330554, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:08<24:57, 3.67s/it] 22%|██▏ | 113/520 [07:12<24:59, 3.68s/it] {'loss': 1.278, 'grad_norm': 0.0019059483677735533, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:12<24:59, 3.68s/it] 22%|██▏ | 114/520 [07:15<24:57, 3.69s/it] {'loss': 1.3771, 'grad_norm': 0.0020124683438425403, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:15<24:57, 3.69s/it] 22%|██▏ | 115/520 [07:19<24:53, 3.69s/it] {'loss': 1.4975, 'grad_norm': 0.0020371296271276458, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:19<24:53, 3.69s/it] 22%|██▏ | 116/520 [07:23<24:46, 3.68s/it] {'loss': 1.4942, 'grad_norm': 0.0018481811307831423, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:23<24:46, 3.68s/it] 22%|██▎ | 117/520 [07:26<24:43, 3.68s/it] {'loss': 1.4711, 'grad_norm': 0.0022826331788792197, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:26<24:43, 3.68s/it] 23%|██▎ | 118/520 [07:30<24:36, 3.67s/it] {'loss': 1.3523, 'grad_norm': 0.0018853795073498472, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:30<24:36, 3.67s/it] 23%|██▎ | 119/520 [07:34<24:33, 3.67s/it] {'loss': 1.3103, 'grad_norm': 0.001965799662573032, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:34<24:33, 3.67s/it] 23%|██▎ | 120/520 [07:37<24:32, 3.68s/it] {'loss': 1.3325, 'grad_norm': 0.002469289656337362, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:37<24:32, 3.68s/it] 23%|██▎ | 121/520 [07:41<24:28, 3.68s/it] {'loss': 1.3913, 'grad_norm': 0.0025749805182455248, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:41<24:28, 3.68s/it] 23%|██▎ | 122/520 [07:45<24:36, 3.71s/it] {'loss': 1.2874, 'grad_norm': 0.0020427815705227545, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:45<24:36, 3.71s/it] 24%|██▎ | 123/520 [07:48<24:25, 3.69s/it] {'loss': 1.4567, 'grad_norm': 0.0022426018746006748, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:48<24:25, 3.69s/it] 24%|██▍ | 124/520 [07:52<24:18, 3.68s/it] {'loss': 1.3661, 'grad_norm': 0.0022167591861620988, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:52<24:18, 3.68s/it] 24%|██▍ | 125/520 [07:56<24:16, 3.69s/it] {'loss': 1.3574, 'grad_norm': 0.0021485964089188766, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:56<24:16, 3.69s/it] 24%|██▍ | 126/520 [08:00<25:31, 3.89s/it] {'loss': 1.3687, 'grad_norm': 0.001724717899626798, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:00<25:31, 3.89s/it] 24%|██▍ | 127/520 [08:04<25:00, 3.82s/it] {'loss': 1.3381, 'grad_norm': 0.0026441639482556113, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:04<25:00, 3.82s/it] 25%|██▍ | 128/520 [08:08<24:40, 3.78s/it] {'loss': 1.389, 'grad_norm': 0.002076596671449281, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:08<24:40, 3.78s/it] 25%|██▍ | 129/520 [08:11<24:23, 3.74s/it] {'loss': 1.3174, 'grad_norm': 0.0017424642339292301, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:11<24:23, 3.74s/it] 25%|██▌ | 130/520 [08:15<24:21, 3.75s/it] {'loss': 1.3717, 'grad_norm': 0.001896489732855636, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:15<24:21, 3.75s/it] 25%|██▌ | 131/520 [08:19<24:35, 3.79s/it] {'loss': 1.3565, 'grad_norm': 0.0021138815852579807, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:19<24:35, 3.79s/it] 25%|██▌ | 132/520 [08:23<24:38, 3.81s/it] {'loss': 1.4208, 'grad_norm': 0.0020449464189773933, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:23<24:38, 3.81s/it] 26%|██▌ | 133/520 [08:27<24:39, 3.82s/it] {'loss': 1.3198, 'grad_norm': 0.0020477907697934606, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:27<24:39, 3.82s/it] 26%|██▌ | 134/520 [08:30<24:44, 3.85s/it] {'loss': 1.4099, 'grad_norm': 0.002232787644982114, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:30<24:44, 3.85s/it] 26%|██▌ | 135/520 [08:34<24:41, 3.85s/it] {'loss': 1.4743, 'grad_norm': 0.0020388918272079234, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:34<24:41, 3.85s/it] 26%|██▌ | 136/520 [08:38<24:47, 3.87s/it] {'loss': 1.3888, 'grad_norm': 0.00207227140209728, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:38<24:47, 3.87s/it] 26%|██▋ | 137/520 [08:42<24:49, 3.89s/it] {'loss': 1.3208, 'grad_norm': 0.0023090476064216037, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:42<24:49, 3.89s/it] 27%|██▋ | 138/520 [08:46<24:46, 3.89s/it] {'loss': 1.3306, 'grad_norm': 0.0019278403960009776, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:46<24:46, 3.89s/it] 27%|██▋ | 139/520 [08:50<24:50, 3.91s/it] {'loss': 1.2435, 'grad_norm': 0.0020481385125986516, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:50<24:50, 3.91s/it] 27%|██▋ | 140/520 [08:54<24:45, 3.91s/it] {'loss': 1.3899, 'grad_norm': 0.0018946566368485598, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:54<24:45, 3.91s/it] 27%|██▋ | 141/520 [08:58<24:40, 3.91s/it] {'loss': 1.4409, 'grad_norm': 0.0019277182918459703, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:58<24:40, 3.91s/it] 27%|██▋ | 142/520 [09:02<24:33, 3.90s/it] {'loss': 1.4166, 'grad_norm': 0.0019017884142710493, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:02<24:33, 3.90s/it] 28%|██▊ | 143/520 [09:06<24:29, 3.90s/it] {'loss': 1.3569, 'grad_norm': 0.002120985443841013, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:06<24:29, 3.90s/it] 28%|██▊ | 144/520 [09:09<24:23, 3.89s/it] {'loss': 1.3051, 'grad_norm': 0.001956971219571965, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:10<24:23, 3.89s/it] 28%|██▊ | 145/520 [09:13<24:20, 3.89s/it] {'loss': 1.2509, 'grad_norm': 0.0017593598479267534, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:13<24:20, 3.89s/it] 28%|██▊ | 146/520 [09:17<24:15, 3.89s/it] {'loss': 1.4789, 'grad_norm': 0.0019982131772829084, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:17<24:15, 3.89s/it] 28%|██▊ | 147/520 [09:21<24:14, 3.90s/it] {'loss': 1.294, 'grad_norm': 0.001966958226699274, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:21<24:14, 3.90s/it] 28%|██▊ | 148/520 [09:25<24:10, 3.90s/it] {'loss': 1.3299, 'grad_norm': 0.0019863794039199347, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:25<24:10, 3.90s/it] 29%|██▊ | 149/520 [09:29<24:04, 3.89s/it] {'loss': 1.2694, 'grad_norm': 0.0019232143932359598, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:29<24:04, 3.89s/it] 29%|██▉ | 150/520 [09:33<23:58, 3.89s/it] {'loss': 1.5086, 'grad_norm': 0.0020896835779997077, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:33<23:58, 3.89s/it] 29%|██▉ | 151/520 [09:37<23:42, 3.85s/it] {'loss': 1.3159, 'grad_norm': 0.00188911127554728, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:37<23:42, 3.85s/it] 29%|██▉ | 152/520 [09:40<23:24, 3.82s/it] {'loss': 1.2821, 'grad_norm': 0.001988614395997743, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:40<23:24, 3.82s/it] 29%|██▉ | 153/520 [09:44<23:02, 3.77s/it] {'loss': 1.315, 'grad_norm': 0.0020568405804363204, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:44<23:02, 3.77s/it] 30%|██▉ | 154/520 [09:48<22:50, 3.74s/it] {'loss': 1.4143, 'grad_norm': 0.0019417670188790713, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:48<22:50, 3.74s/it] 30%|██▉ | 155/520 [09:51<22:40, 3.73s/it] {'loss': 1.3135, 'grad_norm': 0.0019316039996410928, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:51<22:40, 3.73s/it] 30%|███ | 156/520 [09:55<22:35, 3.72s/it] {'loss': 1.3389, 'grad_norm': 0.0020493230052184472, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:55<22:35, 3.72s/it] 30%|███ | 157/520 [09:59<22:26, 3.71s/it] {'loss': 1.4571, 'grad_norm': 0.001997364374050798, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:59<22:26, 3.71s/it] 30%|███ | 158/520 [10:02<22:21, 3.71s/it] {'loss': 1.3209, 'grad_norm': 0.002098158230862549, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:02<22:21, 3.71s/it] 31%|███ | 159/520 [10:06<22:15, 3.70s/it] {'loss': 1.3535, 'grad_norm': 0.0018761964068357178, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:06<22:15, 3.70s/it] 31%|███ | 160/520 [10:10<22:10, 3.70s/it] {'loss': 1.3791, 'grad_norm': 0.0019642159677749076, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:10<22:10, 3.70s/it] 31%|███ | 161/520 [10:14<22:03, 3.69s/it] {'loss': 1.3607, 'grad_norm': 0.001971229671621249, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:14<22:03, 3.69s/it] 31%|███ | 162/520 [10:17<22:01, 3.69s/it] {'loss': 1.396, 'grad_norm': 0.0020482509280399072, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:17<22:01, 3.69s/it] 31%|███▏ | 163/520 [10:21<21:59, 3.70s/it] {'loss': 1.2363, 'grad_norm': 0.002344137792459382, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:21<21:59, 3.70s/it] 32%|███▏ | 164/520 [10:25<22:06, 3.73s/it] {'loss': 1.1982, 'grad_norm': 0.0018503862107493794, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:25<22:06, 3.73s/it] 32%|███▏ | 165/520 [10:29<22:21, 3.78s/it] {'loss': 1.3454, 'grad_norm': 0.0017700001496928869, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:29<22:21, 3.78s/it] 32%|███▏ | 166/520 [10:33<22:31, 3.82s/it] {'loss': 1.3406, 'grad_norm': 0.0019965981060615237, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:33<22:31, 3.82s/it] 32%|███▏ | 167/520 [10:36<22:39, 3.85s/it] {'loss': 1.3374, 'grad_norm': 0.0019608943711386544, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:36<22:39, 3.85s/it] 32%|███▏ | 168/520 [10:40<22:39, 3.86s/it] {'loss': 1.2559, 'grad_norm': 0.0018613130130918617, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:40<22:39, 3.86s/it] 32%|███▎ | 169/520 [10:44<22:37, 3.87s/it] {'loss': 1.3507, 'grad_norm': 0.0018711989681940548, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:44<22:37, 3.87s/it] 33%|███▎ | 170/520 [10:48<22:37, 3.88s/it] {'loss': 1.3244, 'grad_norm': 0.0017450569759756994, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:48<22:37, 3.88s/it] 33%|███▎ | 171/520 [10:52<22:34, 3.88s/it] {'loss': 1.2801, 'grad_norm': 0.0020556058413007355, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:52<22:34, 3.88s/it] 33%|███▎ | 172/520 [10:56<22:28, 3.88s/it] {'loss': 1.3479, 'grad_norm': 0.0019475944762560476, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:56<22:28, 3.88s/it] 33%|███▎ | 173/520 [11:00<22:26, 3.88s/it] {'loss': 1.2875, 'grad_norm': 0.0024380131503897837, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:00<22:26, 3.88s/it] 33%|███▎ | 174/520 [11:04<22:25, 3.89s/it] {'loss': 1.3502, 'grad_norm': 0.002096823845449059, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:04<22:25, 3.89s/it] 34%|███▎ | 175/520 [11:08<22:21, 3.89s/it] {'loss': 1.2519, 'grad_norm': 0.001770358270063253, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:08<22:21, 3.89s/it] 34%|███▍ | 176/520 [11:11<22:18, 3.89s/it] {'loss': 1.4133, 'grad_norm': 0.0018688420850506665, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:11<22:18, 3.89s/it] 34%|███▍ | 177/520 [11:15<22:18, 3.90s/it] {'loss': 1.2855, 'grad_norm': 0.002075044927116544, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:15<22:18, 3.90s/it] 34%|███▍ | 178/520 [11:19<22:12, 3.90s/it] {'loss': 1.324, 'grad_norm': 0.0020736520416219976, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:19<22:12, 3.90s/it] 34%|███▍ | 179/520 [11:23<22:07, 3.89s/it] {'loss': 1.41, 'grad_norm': 0.00179561484858704, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:23<22:07, 3.89s/it] 35%|███▍ | 180/520 [11:27<21:59, 3.88s/it] {'loss': 1.328, 'grad_norm': 0.0020270382624602134, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:27<21:59, 3.88s/it] 35%|███▍ | 181/520 [11:31<21:57, 3.89s/it] {'loss': 1.2951, 'grad_norm': 0.0016492934799124752, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:31<21:57, 3.89s/it] 35%|███▌ | 182/520 [11:35<21:55, 3.89s/it] {'loss': 1.3038, 'grad_norm': 0.0018657480143007136, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:35<21:55, 3.89s/it] 35%|███▌ | 183/520 [11:39<21:47, 3.88s/it] {'loss': 1.3436, 'grad_norm': 0.001850508700804724, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:39<21:47, 3.88s/it] 35%|███▌ | 184/520 [11:43<21:53, 3.91s/it] {'loss': 1.25, 'grad_norm': 0.001873209314519758, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:43<21:53, 3.91s/it] 36%|███▌ | 185/520 [11:47<21:44, 3.90s/it] {'loss': 1.4249, 'grad_norm': 0.0018402385735532016, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:47<21:44, 3.90s/it] 36%|███▌ | 186/520 [11:50<21:39, 3.89s/it] {'loss': 1.2739, 'grad_norm': 0.0018025564834811538, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:50<21:39, 3.89s/it] 36%|███▌ | 187/520 [11:54<21:32, 3.88s/it] {'loss': 1.2902, 'grad_norm': 0.0021184254140723573, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:54<21:32, 3.88s/it] 36%|███▌ | 188/520 [11:58<21:29, 3.88s/it] {'loss': 1.3595, 'grad_norm': 0.0019266626675808075, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:58<21:29, 3.88s/it] 36%|███▋ | 189/520 [12:02<21:28, 3.89s/it] {'loss': 1.372, 'grad_norm': 0.0016985817669600309, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:02<21:28, 3.89s/it] 37%|███▋ | 190/520 [12:06<21:27, 3.90s/it] {'loss': 1.2915, 'grad_norm': 0.002093297423013253, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:06<21:27, 3.90s/it] 37%|███▋ | 191/520 [12:10<21:23, 3.90s/it] {'loss': 1.2395, 'grad_norm': 0.0016841672241996834, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:10<21:23, 3.90s/it] 37%|███▋ | 192/520 [12:14<21:17, 3.90s/it] {'loss': 1.336, 'grad_norm': 0.0017978746011286784, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:14<21:17, 3.90s/it] 37%|███▋ | 193/520 [12:18<21:14, 3.90s/it] {'loss': 1.3379, 'grad_norm': 0.0021625504296969565, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:18<21:14, 3.90s/it] 37%|███▋ | 194/520 [12:22<21:09, 3.89s/it] {'loss': 1.2137, 'grad_norm': 0.001687739983569612, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:22<21:09, 3.89s/it] 38%|███▊ | 195/520 [12:25<21:06, 3.90s/it] {'loss': 1.3468, 'grad_norm': 0.0017726156713259885, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:25<21:06, 3.90s/it] 38%|███▊ | 196/520 [12:29<20:55, 3.88s/it] {'loss': 1.3161, 'grad_norm': 0.001959095374210495, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:29<20:55, 3.88s/it] 38%|███▊ | 197/520 [12:33<20:58, 3.90s/it] {'loss': 1.2721, 'grad_norm': 0.0019099696278269624, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:33<20:58, 3.90s/it] 38%|███▊ | 198/520 [12:37<20:51, 3.89s/it] {'loss': 1.3467, 'grad_norm': 0.0020633796120534047, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:37<20:51, 3.89s/it] 38%|███▊ | 199/520 [12:41<20:49, 3.89s/it] {'loss': 1.2642, 'grad_norm': 0.0019063633624862503, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:41<20:49, 3.89s/it] 38%|███▊ | 200/520 [12:45<20:44, 3.89s/it] {'loss': 1.2675, 'grad_norm': 0.0019048788385057887, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:45<20:44, 3.89s/it] 39%|███▊ | 201/520 [12:49<20:39, 3.88s/it] {'loss': 1.2963, 'grad_norm': 0.001672545284898376, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:49<20:39, 3.88s/it] 39%|███▉ | 202/520 [12:53<20:35, 3.89s/it] {'loss': 1.2614, 'grad_norm': 0.0017660722745018423, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:53<20:35, 3.89s/it] 39%|███▉ | 203/520 [12:56<20:27, 3.87s/it] {'loss': 1.3164, 'grad_norm': 0.0019051487254877109, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:56<20:27, 3.87s/it] 39%|███▉ | 204/520 [13:00<20:20, 3.86s/it] {'loss': 1.329, 'grad_norm': 0.0018327160543885647, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:00<20:20, 3.86s/it] 39%|███▉ | 205/520 [13:04<20:01, 3.81s/it] {'loss': 1.2981, 'grad_norm': 0.001816651409761731, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:04<20:01, 3.81s/it] 40%|███▉ | 206/520 [13:08<19:46, 3.78s/it] {'loss': 1.3789, 'grad_norm': 0.001794748048178438, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:08<19:46, 3.78s/it] 40%|███▉ | 207/520 [13:11<19:37, 3.76s/it] {'loss': 1.2807, 'grad_norm': 0.0016860920368524516, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:11<19:37, 3.76s/it] 40%|████ | 208/520 [13:15<19:33, 3.76s/it] {'loss': 1.3526, 'grad_norm': 0.002035843426413067, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:15<19:33, 3.76s/it] 40%|████ | 209/520 [13:19<19:36, 3.78s/it] {'loss': 1.2591, 'grad_norm': 0.001750440164579015, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:19<19:36, 3.78s/it] 40%|████ | 210/520 [13:23<19:33, 3.79s/it] {'loss': 1.3438, 'grad_norm': 0.0019471730758848227, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:23<19:33, 3.79s/it] 41%|████ | 211/520 [13:27<19:35, 3.80s/it] {'loss': 1.3506, 'grad_norm': 0.0017097969309516508, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:27<19:35, 3.80s/it] 41%|████ | 212/520 [13:31<19:37, 3.82s/it] {'loss': 1.3249, 'grad_norm': 0.0017832715273476403, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:31<19:37, 3.82s/it] 41%|████ | 213/520 [13:34<19:39, 3.84s/it] {'loss': 1.282, 'grad_norm': 0.002068562166710403, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:34<19:39, 3.84s/it] 41%|████ | 214/520 [13:38<19:31, 3.83s/it] {'loss': 1.2747, 'grad_norm': 0.001959421176206219, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:38<19:31, 3.83s/it] 41%|████▏ | 215/520 [13:42<19:17, 3.79s/it] {'loss': 1.2229, 'grad_norm': 0.00181326678152697, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:42<19:17, 3.79s/it] 42%|████▏ | 216/520 [13:46<19:12, 3.79s/it] {'loss': 1.1964, 'grad_norm': 0.0017835108874445192, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:46<19:12, 3.79s/it] 42%|████▏ | 217/520 [13:50<19:13, 3.81s/it] {'loss': 1.3202, 'grad_norm': 0.001841257734392548, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:50<19:13, 3.81s/it] 42%|████▏ | 218/520 [13:53<18:59, 3.77s/it] {'loss': 1.312, 'grad_norm': 0.0018603971144532867, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:53<18:59, 3.77s/it] 42%|████▏ | 219/520 [13:57<18:47, 3.75s/it] {'loss': 1.2945, 'grad_norm': 0.0016330914282380706, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:57<18:47, 3.75s/it] 42%|████▏ | 220/520 [14:01<18:38, 3.73s/it] {'loss': 1.2755, 'grad_norm': 0.0017839322810478326, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:01<18:38, 3.73s/it] 42%|████▎ | 221/520 [14:04<18:30, 3.72s/it] {'loss': 1.3129, 'grad_norm': 0.0017738349276177033, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:04<18:30, 3.72s/it] 43%|████▎ | 222/520 [14:08<18:25, 3.71s/it] {'loss': 1.2397, 'grad_norm': 0.001837147201948082, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:08<18:25, 3.71s/it] 43%|████▎ | 223/520 [14:12<18:32, 3.74s/it] {'loss': 1.2252, 'grad_norm': 0.0016134831727139733, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:12<18:32, 3.74s/it] 43%|████▎ | 224/520 [14:16<18:22, 3.73s/it] {'loss': 1.3926, 'grad_norm': 0.001968820923667599, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:16<18:22, 3.73s/it] 43%|████▎ | 225/520 [14:19<18:14, 3.71s/it] {'loss': 1.2481, 'grad_norm': 0.0017554969794205308, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:19<18:14, 3.71s/it] 43%|████▎ | 226/520 [14:23<18:09, 3.70s/it] {'loss': 1.3418, 'grad_norm': 0.0016918360385788994, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:23<18:09, 3.70s/it] 44%|████▎ | 227/520 [14:27<18:04, 3.70s/it] {'loss': 1.3316, 'grad_norm': 0.001669865003083041, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:27<18:04, 3.70s/it] 44%|████▍ | 228/520 [14:30<17:58, 3.69s/it] {'loss': 1.395, 'grad_norm': 0.0018033112780445597, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:30<17:58, 3.69s/it] 44%|████▍ | 229/520 [14:34<17:53, 3.69s/it] {'loss': 1.2996, 'grad_norm': 0.0015651674574883592, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:34<17:53, 3.69s/it] 44%|████▍ | 230/520 [14:38<17:50, 3.69s/it] {'loss': 1.1901, 'grad_norm': 0.0017180584671877414, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:38<17:50, 3.69s/it] 44%|████▍ | 231/520 [14:41<17:53, 3.71s/it] {'loss': 1.2464, 'grad_norm': 0.0015557396691584788, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:41<17:53, 3.71s/it] 45%|████▍ | 232/520 [14:45<18:05, 3.77s/it] {'loss': 1.4332, 'grad_norm': 0.002064527696538363, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:45<18:05, 3.77s/it] 45%|████▍ | 233/520 [14:49<18:09, 3.79s/it] {'loss': 1.3153, 'grad_norm': 0.0020206026450596813, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:49<18:09, 3.79s/it] 45%|████▌ | 234/520 [14:53<18:08, 3.81s/it] {'loss': 1.1905, 'grad_norm': 0.0018051836751774427, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:53<18:08, 3.81s/it] 45%|████▌ | 235/520 [14:57<18:14, 3.84s/it] {'loss': 1.2548, 'grad_norm': 0.0018952798191553262, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:57<18:14, 3.84s/it] 45%|████▌ | 236/520 [15:01<18:16, 3.86s/it] {'loss': 1.353, 'grad_norm': 0.0016753521715495048, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:01<18:16, 3.86s/it] 46%|████▌ | 237/520 [15:05<18:15, 3.87s/it] {'loss': 1.3323, 'grad_norm': 0.0018448488280946597, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:05<18:15, 3.87s/it] 46%|████▌ | 238/520 [15:09<18:12, 3.88s/it] {'loss': 1.269, 'grad_norm': 0.0018526687818276315, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:09<18:12, 3.88s/it] 46%|████▌ | 239/520 [15:13<18:16, 3.90s/it] {'loss': 1.3498, 'grad_norm': 0.0018481219115950194, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:13<18:16, 3.90s/it] 46%|████▌ | 240/520 [15:16<18:11, 3.90s/it] {'loss': 1.1378, 'grad_norm': 0.0016415293224444757, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:16<18:11, 3.90s/it] 46%|████▋ | 241/520 [15:20<18:08, 3.90s/it] {'loss': 1.2259, 'grad_norm': 0.0016600981096761287, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:20<18:08, 3.90s/it] 47%|████▋ | 242/520 [15:24<18:05, 3.91s/it] {'loss': 1.2457, 'grad_norm': 0.0015767470137499273, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:24<18:05, 3.91s/it] 47%|████▋ | 243/520 [15:28<18:03, 3.91s/it] {'loss': 1.2364, 'grad_norm': 0.0017167445582022098, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:28<18:03, 3.91s/it] 47%|████▋ | 244/520 [15:32<17:58, 3.91s/it] {'loss': 1.3691, 'grad_norm': 0.0018475209496954582, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:32<17:58, 3.91s/it] 47%|████▋ | 245/520 [15:36<17:52, 3.90s/it] {'loss': 1.2183, 'grad_norm': 0.0018642107213386438, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:36<17:52, 3.90s/it] 47%|████▋ | 246/520 [15:40<17:50, 3.91s/it] {'loss': 1.4068, 'grad_norm': 0.001844896343827296, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:40<17:50, 3.91s/it] 48%|████▊ | 247/520 [15:44<17:46, 3.91s/it] {'loss': 1.3991, 'grad_norm': 0.001786916976884729, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:44<17:46, 3.91s/it] 48%|████▊ | 248/520 [15:48<17:40, 3.90s/it] {'loss': 1.2308, 'grad_norm': 0.0017523724416176484, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:48<17:40, 3.90s/it] 48%|████▊ | 249/520 [15:52<17:37, 3.90s/it] {'loss': 1.3218, 'grad_norm': 0.0017350899007910362, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:52<17:37, 3.90s/it] 48%|████▊ | 250/520 [15:55<17:33, 3.90s/it] {'loss': 1.2592, 'grad_norm': 0.0018332702549989093, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:55<17:33, 3.90s/it] 48%|████▊ | 251/520 [15:59<17:32, 3.91s/it] {'loss': 1.3284, 'grad_norm': 0.0016292282563774897, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:59<17:32, 3.91s/it] 48%|████▊ | 252/520 [16:03<17:26, 3.91s/it] {'loss': 1.3052, 'grad_norm': 0.0017900743145921117, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:03<17:26, 3.91s/it] 49%|████▊ | 253/520 [16:07<17:24, 3.91s/it] {'loss': 1.3285, 'grad_norm': 0.0019029681751188976, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:07<17:24, 3.91s/it] 49%|████▉ | 254/520 [16:11<17:18, 3.90s/it] {'loss': 1.2471, 'grad_norm': 0.001592719417305566, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:11<17:18, 3.90s/it] 49%|████▉ | 255/520 [16:15<17:12, 3.90s/it] {'loss': 1.2565, 'grad_norm': 0.0017927880288196637, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:15<17:12, 3.90s/it] 49%|████▉ | 256/520 [16:19<16:50, 3.83s/it] {'loss': 1.2985, 'grad_norm': 0.0018198057167208592, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:19<16:50, 3.83s/it] 49%|████▉ | 257/520 [16:22<16:33, 3.78s/it] {'loss': 1.2919, 'grad_norm': 0.001806520188792879, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:22<16:33, 3.78s/it] 50%|████▉ | 258/520 [16:26<16:23, 3.75s/it] {'loss': 1.3005, 'grad_norm': 0.0015364637296544426, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:26<16:23, 3.75s/it] 50%|████▉ | 259/520 [16:30<16:13, 3.73s/it] {'loss': 1.3734, 'grad_norm': 0.0019483285491073768, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:30<16:13, 3.73s/it] 50%|█████ | 260/520 [16:33<16:06, 3.72s/it] {'loss': 1.3543, 'grad_norm': 0.0017550916605764977, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:33<16:06, 3.72s/it] 50%|█████ | 261/520 [16:37<16:01, 3.71s/it] {'loss': 1.2926, 'grad_norm': 0.001750484762691223, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:37<16:01, 3.71s/it] 50%|█████ | 262/520 [16:41<15:53, 3.70s/it] {'loss': 1.2162, 'grad_norm': 0.0017592665754914275, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:41<15:53, 3.70s/it] 51%|█████ | 263/520 [16:44<15:48, 3.69s/it] {'loss': 1.3139, 'grad_norm': 0.0018195715618310208, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:44<15:48, 3.69s/it] 51%|█████ | 264/520 [16:48<15:44, 3.69s/it] {'loss': 1.3269, 'grad_norm': 0.001713885117353041, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:48<15:44, 3.69s/it] 51%|█████ | 265/520 [16:52<15:42, 3.70s/it] {'loss': 1.2303, 'grad_norm': 0.0019161208943548611, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:52<15:42, 3.70s/it] 51%|█████ | 266/520 [16:55<15:34, 3.68s/it] {'loss': 1.0905, 'grad_norm': 0.0015728499243739412, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:55<15:34, 3.68s/it] 51%|█████▏ | 267/520 [16:59<15:32, 3.69s/it] {'loss': 1.2333, 'grad_norm': 0.0016484487711629719, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:59<15:32, 3.69s/it] 52%|█████▏ | 268/520 [17:03<15:28, 3.69s/it] {'loss': 1.4231, 'grad_norm': 0.0020301881604690274, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:03<15:28, 3.69s/it] 52%|█████▏ | 269/520 [17:07<15:25, 3.69s/it] {'loss': 1.3362, 'grad_norm': 0.0017752185606448596, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:07<15:25, 3.69s/it] 52%|█████▏ | 270/520 [17:10<15:24, 3.70s/it] {'loss': 1.2303, 'grad_norm': 0.0016307556008379452, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:10<15:24, 3.70s/it] 52%|█████▏ | 271/520 [17:14<15:21, 3.70s/it] {'loss': 1.3259, 'grad_norm': 0.0017393087850451601, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:14<15:21, 3.70s/it] 52%|█████▏ | 272/520 [17:18<15:18, 3.70s/it] {'loss': 1.2539, 'grad_norm': 0.001790639755553169, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:18<15:18, 3.70s/it] 52%|█████▎ | 273/520 [17:21<15:12, 3.70s/it] {'loss': 1.3776, 'grad_norm': 0.001799852633891133, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:21<15:12, 3.70s/it] 53%|█████▎ | 274/520 [17:25<15:08, 3.69s/it] {'loss': 1.2952, 'grad_norm': 0.00190901035307817, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:25<15:08, 3.69s/it] 53%|█████▎ | 275/520 [17:29<15:06, 3.70s/it] {'loss': 1.2344, 'grad_norm': 0.0018116798578691417, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:29<15:06, 3.70s/it] 53%|█████▎ | 276/520 [17:32<15:00, 3.69s/it] {'loss': 1.3117, 'grad_norm': 0.001980131447279998, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:32<15:00, 3.69s/it] 53%|█████▎ | 277/520 [17:36<14:56, 3.69s/it] {'loss': 1.3727, 'grad_norm': 0.0017213110400957346, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:36<14:56, 3.69s/it] 53%|█████▎ | 278/520 [17:40<14:53, 3.69s/it] {'loss': 1.1837, 'grad_norm': 0.0016146353429896993, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:40<14:53, 3.69s/it] 54%|█████▎ | 279/520 [17:44<14:49, 3.69s/it] {'loss': 1.251, 'grad_norm': 0.0019286440565172923, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:44<14:49, 3.69s/it] 54%|█████▍ | 280/520 [17:47<14:46, 3.70s/it] {'loss': 1.2455, 'grad_norm': 0.002157199870739311, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:47<14:46, 3.70s/it] 54%|█████▍ | 281/520 [17:51<14:44, 3.70s/it] {'loss': 1.3365, 'grad_norm': 0.0018122843862512104, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:51<14:44, 3.70s/it] 54%|█████▍ | 282/520 [17:55<14:42, 3.71s/it] {'loss': 1.1965, 'grad_norm': 0.0015953798979221347, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:55<14:42, 3.71s/it] 54%|█████▍ | 283/520 [17:58<14:38, 3.71s/it] {'loss': 1.3594, 'grad_norm': 0.0018737585884568049, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:58<14:38, 3.71s/it] 55%|█████▍ | 284/520 [18:02<14:40, 3.73s/it] {'loss': 1.2419, 'grad_norm': 0.0018675136120856334, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:02<14:40, 3.73s/it] 55%|█████▍ | 285/520 [18:06<14:44, 3.76s/it] {'loss': 1.2237, 'grad_norm': 0.0017652525295399034, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:06<14:44, 3.76s/it] 55%|█████▌ | 286/520 [18:10<14:35, 3.74s/it] {'loss': 1.101, 'grad_norm': 0.0018262194032546205, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:10<14:35, 3.74s/it] 55%|█████▌ | 287/520 [18:13<14:27, 3.72s/it] {'loss': 1.3366, 'grad_norm': 0.0017311697155055492, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:13<14:27, 3.72s/it] 55%|█████▌ | 288/520 [18:17<14:24, 3.73s/it] {'loss': 1.3793, 'grad_norm': 0.0016296851549918727, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:17<14:24, 3.73s/it] 56%|█████▌ | 289/520 [18:21<14:19, 3.72s/it] {'loss': 1.2386, 'grad_norm': 0.0016008767999666864, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:21<14:19, 3.72s/it] 56%|█████▌ | 290/520 [18:24<14:12, 3.71s/it] {'loss': 1.1574, 'grad_norm': 0.0015501945317164321, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:24<14:12, 3.71s/it] 56%|█████▌ | 291/520 [18:28<14:07, 3.70s/it] {'loss': 1.2135, 'grad_norm': 0.0018115808316203207, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:28<14:07, 3.70s/it] 56%|█████▌ | 292/520 [18:32<14:04, 3.70s/it] {'loss': 1.2679, 'grad_norm': 0.0016975500892055441, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:32<14:04, 3.70s/it] 56%|█████▋ | 293/520 [18:36<13:59, 3.70s/it] {'loss': 1.208, 'grad_norm': 0.0017895528600354743, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:36<13:59, 3.70s/it] 57%|█████▋ | 294/520 [18:39<13:54, 3.69s/it] {'loss': 1.2313, 'grad_norm': 0.001859297331732047, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:39<13:54, 3.69s/it] 57%|█████▋ | 295/520 [18:43<13:52, 3.70s/it] {'loss': 1.3027, 'grad_norm': 0.0017698584228706888, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:43<13:52, 3.70s/it] 57%|█████▋ | 296/520 [18:47<13:45, 3.68s/it] {'loss': 1.1769, 'grad_norm': 0.0018321714896463047, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:47<13:45, 3.68s/it] 57%|█████▋ | 297/520 [18:50<13:39, 3.68s/it] {'loss': 1.3001, 'grad_norm': 0.0018288038900690848, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:50<13:39, 3.68s/it] 57%|█████▋ | 298/520 [18:54<13:34, 3.67s/it] {'loss': 1.2747, 'grad_norm': 0.0015500138873051717, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:54<13:34, 3.67s/it] 57%|█████▊ | 299/520 [18:58<13:31, 3.67s/it] {'loss': 1.3203, 'grad_norm': 0.001590479045421572, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:58<13:31, 3.67s/it] 58%|█████▊ | 300/520 [19:01<13:27, 3.67s/it] {'loss': 1.3249, 'grad_norm': 0.001713005667970645, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:01<13:27, 3.67s/it] 58%|█████▊ | 301/520 [19:05<13:24, 3.67s/it] {'loss': 1.2999, 'grad_norm': 0.0016605344133729198, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:05<13:24, 3.67s/it] 58%|█████▊ | 302/520 [19:09<13:21, 3.68s/it] {'loss': 1.3425, 'grad_norm': 0.0018267333979554744, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:09<13:21, 3.68s/it] 58%|█████▊ | 303/520 [19:12<13:19, 3.68s/it] {'loss': 1.2357, 'grad_norm': 0.0019426873386282708, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:12<13:19, 3.68s/it] 58%|█████▊ | 304/520 [19:16<13:16, 3.69s/it] {'loss': 1.2252, 'grad_norm': 0.0017910488630856964, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:16<13:16, 3.69s/it] 59%|█████▊ | 305/520 [19:20<13:14, 3.69s/it] {'loss': 1.3405, 'grad_norm': 0.0019776779464906005, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:20<13:14, 3.69s/it] 59%|█████▉ | 306/520 [19:23<13:09, 3.69s/it] {'loss': 1.2864, 'grad_norm': 0.0017611848420070822, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:23<13:09, 3.69s/it] 59%|█████▉ | 307/520 [19:28<13:34, 3.82s/it] {'loss': 1.2197, 'grad_norm': 0.0016805610755363102, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:28<13:34, 3.82s/it] 59%|█████▉ | 308/520 [19:31<13:20, 3.78s/it] {'loss': 1.3392, 'grad_norm': 0.0017651097985566008, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:31<13:20, 3.78s/it] 59%|█████▉ | 309/520 [19:35<13:11, 3.75s/it] {'loss': 1.2204, 'grad_norm': 0.0015930148971587596, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:35<13:11, 3.75s/it] 60%|█████▉ | 310/520 [19:39<13:01, 3.72s/it] {'loss': 1.1999, 'grad_norm': 0.0017315003090512273, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:39<13:01, 3.72s/it] 60%|█████▉ | 311/520 [19:42<12:55, 3.71s/it] {'loss': 1.1664, 'grad_norm': 0.0016499104639604693, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:42<12:55, 3.71s/it] 60%|██████ | 312/520 [19:46<12:47, 3.69s/it] {'loss': 1.1582, 'grad_norm': 0.0017614732162135958, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:46<12:47, 3.69s/it] 60%|██████ | 313/520 [19:50<12:43, 3.69s/it] {'loss': 1.1466, 'grad_norm': 0.001515163591939963, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:50<12:43, 3.69s/it] 60%|██████ | 314/520 [19:54<13:12, 3.84s/it] {'loss': 1.1852, 'grad_norm': 0.0015768704193946847, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:54<13:12, 3.84s/it] 61%|██████ | 315/520 [19:57<12:58, 3.80s/it] {'loss': 1.2826, 'grad_norm': 0.0020164187516767606, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:57<12:58, 3.80s/it] 61%|██████ | 316/520 [20:02<13:21, 3.93s/it] {'loss': 1.1593, 'grad_norm': 0.002017079986376746, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:02<13:21, 3.93s/it] 61%|██████ | 317/520 [20:05<13:01, 3.85s/it] {'loss': 1.1801, 'grad_norm': 0.001542834631307312, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:05<13:01, 3.85s/it] 61%|██████ | 318/520 [20:09<12:47, 3.80s/it] {'loss': 1.3054, 'grad_norm': 0.0018388905909277999, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:09<12:47, 3.80s/it] 61%|██████▏ | 319/520 [20:13<12:58, 3.87s/it] {'loss': 1.1656, 'grad_norm': 0.001581904684071986, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:13<12:58, 3.87s/it] 62%|██████▏ | 320/520 [20:17<12:41, 3.81s/it] {'loss': 1.1104, 'grad_norm': 0.0017320338559354727, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:17<12:41, 3.81s/it] 62%|██████▏ | 321/520 [20:20<12:30, 3.77s/it] {'loss': 1.3105, 'grad_norm': 0.0018007424755859475, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:20<12:30, 3.77s/it] 62%|██████▏ | 322/520 [20:24<12:21, 3.74s/it] {'loss': 1.1726, 'grad_norm': 0.00165199993060855, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:24<12:21, 3.74s/it] 62%|██████▏ | 323/520 [20:28<12:13, 3.73s/it] {'loss': 1.2338, 'grad_norm': 0.0016520592434930972, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:28<12:13, 3.73s/it] 62%|██████▏ | 324/520 [20:31<12:07, 3.71s/it] {'loss': 1.2449, 'grad_norm': 0.0016836681774878056, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:31<12:07, 3.71s/it] 62%|██████▎ | 325/520 [20:35<12:01, 3.70s/it] {'loss': 1.2505, 'grad_norm': 0.0018289885394656377, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:35<12:01, 3.70s/it] 63%|██████▎ | 326/520 [20:39<11:55, 3.69s/it] {'loss': 1.2404, 'grad_norm': 0.0017275551936515178, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:39<11:55, 3.69s/it] 63%|██████▎ | 327/520 [20:42<11:50, 3.68s/it] {'loss': 1.3016, 'grad_norm': 0.0019022590379733683, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:42<11:50, 3.68s/it] 63%|██████▎ | 328/520 [20:46<11:44, 3.67s/it] {'loss': 1.3027, 'grad_norm': 0.001751797757856421, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:46<11:44, 3.67s/it] 63%|██████▎ | 329/520 [20:50<11:42, 3.68s/it] {'loss': 1.1636, 'grad_norm': 0.0014640516017159404, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:50<11:42, 3.68s/it] 63%|██████▎ | 330/520 [20:53<11:37, 3.67s/it] {'loss': 1.241, 'grad_norm': 0.0015941913686773497, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:53<11:37, 3.67s/it] 64%|██████▎ | 331/520 [20:57<11:33, 3.67s/it] {'loss': 1.2004, 'grad_norm': 0.001661445549851726, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:57<11:33, 3.67s/it] 64%|██████▍ | 332/520 [21:01<11:30, 3.67s/it] {'loss': 1.3217, 'grad_norm': 0.001621576083831744, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:01<11:30, 3.67s/it] 64%|██████▍ | 333/520 [21:04<11:26, 3.67s/it] {'loss': 1.3503, 'grad_norm': 0.001753465012168132, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:04<11:26, 3.67s/it] 64%|██████▍ | 334/520 [21:08<11:24, 3.68s/it] {'loss': 1.2442, 'grad_norm': 0.0018494240120512392, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:08<11:24, 3.68s/it] 64%|██████▍ | 335/520 [21:12<11:29, 3.73s/it] {'loss': 1.2442, 'grad_norm': 0.0015456255249585522, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:12<11:29, 3.73s/it] 65%|██████▍ | 336/520 [21:16<11:34, 3.77s/it] {'loss': 1.1309, 'grad_norm': 0.001802049207051288, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:16<11:34, 3.77s/it] 65%|██████▍ | 337/520 [21:20<11:39, 3.82s/it] {'loss': 1.1296, 'grad_norm': 0.0016552974451498018, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:20<11:39, 3.82s/it] 65%|██████▌ | 338/520 [21:24<11:41, 3.85s/it] {'loss': 1.2552, 'grad_norm': 0.0017290346259557715, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:24<11:41, 3.85s/it] 65%|██████▌ | 339/520 [21:28<11:39, 3.87s/it] {'loss': 1.193, 'grad_norm': 0.001644650146402024, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:28<11:39, 3.87s/it] 65%|██████▌ | 340/520 [21:32<11:38, 3.88s/it] {'loss': 1.1869, 'grad_norm': 0.0016172433567801553, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:32<11:38, 3.88s/it] 66%|██████▌ | 341/520 [21:35<11:34, 3.88s/it] {'loss': 1.2051, 'grad_norm': 0.0017518694274739905, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:35<11:34, 3.88s/it] 66%|██████▌ | 342/520 [21:39<11:34, 3.90s/it] {'loss': 1.2834, 'grad_norm': 0.0020440904148119853, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:39<11:34, 3.90s/it] 66%|██████▌ | 343/520 [21:43<11:29, 3.90s/it] {'loss': 1.2413, 'grad_norm': 0.0016149436359766125, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:43<11:29, 3.90s/it] 66%|██████▌ | 344/520 [21:47<11:25, 3.89s/it] {'loss': 1.1605, 'grad_norm': 0.001647911037940789, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:47<11:25, 3.89s/it] 66%|██████▋ | 345/520 [21:51<11:21, 3.90s/it] {'loss': 1.2812, 'grad_norm': 0.001831931725729835, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:51<11:21, 3.90s/it] 67%|██████▋ | 346/520 [21:55<11:18, 3.90s/it] {'loss': 1.2486, 'grad_norm': 0.0015907277199899053, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:55<11:18, 3.90s/it] 67%|██████▋ | 347/520 [21:59<11:13, 3.90s/it] {'loss': 1.1709, 'grad_norm': 0.001594020940934309, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:59<11:13, 3.90s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:03<11:10, 3.90s/it] {'loss': 1.1309, 'grad_norm': 0.0018678413757741573, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:03<11:10, 3.90s/it] 67%|██████▋ | 349/520 [22:07<11:04, 3.89s/it] {'loss': 1.1775, 'grad_norm': 0.001844082967677432, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:07<11:04, 3.89s/it] 67%|██████▋ | 350/520 [22:10<10:58, 3.87s/it] {'loss': 1.2202, 'grad_norm': 0.0017534600663280627, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:11<10:58, 3.87s/it] 68%|██████▊ | 351/520 [22:14<10:54, 3.87s/it] {'loss': 1.1313, 'grad_norm': 0.0015441602077000001, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:14<10:54, 3.87s/it] 68%|██████▊ | 352/520 [22:18<10:51, 3.88s/it] {'loss': 1.2477, 'grad_norm': 0.0016897907006289895, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:18<10:51, 3.88s/it] 68%|██████▊ | 353/520 [22:22<10:45, 3.87s/it] {'loss': 1.198, 'grad_norm': 0.001516358497876371, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:22<10:45, 3.87s/it] 68%|██████▊ | 354/520 [22:26<10:32, 3.81s/it] {'loss': 1.3268, 'grad_norm': 0.0016172885329030842, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:26<10:32, 3.81s/it] 68%|██████▊ | 355/520 [22:29<10:21, 3.77s/it] {'loss': 1.179, 'grad_norm': 0.0016374556159027486, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:29<10:21, 3.77s/it] 68%|██████▊ | 356/520 [22:33<10:13, 3.74s/it] {'loss': 1.1862, 'grad_norm': 0.0017162624639748866, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:33<10:13, 3.74s/it] 69%|██████▊ | 357/520 [22:37<10:04, 3.71s/it] {'loss': 1.2145, 'grad_norm': 0.0015722251949911268, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:37<10:04, 3.71s/it] 69%|██████▉ | 358/520 [22:40<10:02, 3.72s/it] {'loss': 1.1437, 'grad_norm': 0.0016063603466338328, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:40<10:02, 3.72s/it] 69%|██████▉ | 359/520 [22:44<09:56, 3.71s/it] {'loss': 1.2534, 'grad_norm': 0.001704312197385143, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:44<09:56, 3.71s/it] 69%|██████▉ | 360/520 [22:48<09:50, 3.69s/it] {'loss': 1.2665, 'grad_norm': 0.0017001188091410786, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:48<09:50, 3.69s/it] 69%|██████▉ | 361/520 [22:51<09:45, 3.69s/it] {'loss': 1.2692, 'grad_norm': 0.0015321589488624045, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:51<09:45, 3.69s/it] 70%|██████▉ | 362/520 [22:55<09:42, 3.68s/it] {'loss': 1.2077, 'grad_norm': 0.001812165996429982, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:55<09:42, 3.68s/it] 70%|██████▉ | 363/520 [22:59<09:37, 3.68s/it] {'loss': 1.2294, 'grad_norm': 0.0016449144663692804, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:59<09:37, 3.68s/it] 70%|███████ | 364/520 [23:03<09:34, 3.69s/it] {'loss': 1.2901, 'grad_norm': 0.0016794059108207475, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:03<09:34, 3.69s/it] 70%|███████ | 365/520 [23:06<09:29, 3.67s/it] {'loss': 1.2878, 'grad_norm': 0.0017164439408872277, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:06<09:29, 3.67s/it] 70%|███████ | 366/520 [23:10<09:29, 3.70s/it] {'loss': 1.2462, 'grad_norm': 0.0016383034087162991, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:10<09:29, 3.70s/it] 71%|███████ | 367/520 [23:14<09:23, 3.68s/it] {'loss': 1.2423, 'grad_norm': 0.0016394757652949774, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:14<09:23, 3.68s/it] 71%|███████ | 368/520 [23:17<09:17, 3.67s/it] {'loss': 1.0931, 'grad_norm': 0.0016629229483610495, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:17<09:17, 3.67s/it] 71%|███████ | 369/520 [23:21<09:13, 3.67s/it] {'loss': 1.2406, 'grad_norm': 0.0015294334877492964, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:21<09:13, 3.67s/it] 71%|███████ | 370/520 [23:25<09:09, 3.67s/it] {'loss': 1.1558, 'grad_norm': 0.0015962095851231736, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:25<09:09, 3.67s/it] 71%|███████▏ | 371/520 [23:28<09:05, 3.66s/it] {'loss': 1.1545, 'grad_norm': 0.001723744728855877, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:28<09:05, 3.66s/it] 72%|███████▏ | 372/520 [23:32<09:01, 3.66s/it] {'loss': 1.3296, 'grad_norm': 0.0015513246524669522, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:32<09:01, 3.66s/it] 72%|███████▏ | 373/520 [23:36<08:57, 3.66s/it] {'loss': 1.209, 'grad_norm': 0.0017869968874958048, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:36<08:57, 3.66s/it] 72%|███████▏ | 374/520 [23:39<08:53, 3.65s/it] {'loss': 1.2295, 'grad_norm': 0.0016272679975676922, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:39<08:53, 3.65s/it] 72%|███████▏ | 375/520 [23:43<08:49, 3.65s/it] {'loss': 1.1527, 'grad_norm': 0.0016608103842975003, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:43<08:49, 3.65s/it] 72%|███████▏ | 376/520 [23:46<08:46, 3.66s/it] {'loss': 1.2703, 'grad_norm': 0.0015462543614357164, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:46<08:46, 3.66s/it] 72%|███████▎ | 377/520 [23:50<08:42, 3.66s/it] {'loss': 1.2093, 'grad_norm': 0.0018758798684679748, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:50<08:42, 3.66s/it] 73%|███████▎ | 378/520 [23:54<08:42, 3.68s/it] {'loss': 1.2571, 'grad_norm': 0.0016043481729127727, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:54<08:42, 3.68s/it] 73%|███████▎ | 379/520 [23:58<08:39, 3.69s/it] {'loss': 1.2422, 'grad_norm': 0.00156946570935217, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:58<08:39, 3.69s/it] 73%|███████▎ | 380/520 [24:01<08:35, 3.68s/it] {'loss': 1.293, 'grad_norm': 0.001741096752598036, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:01<08:35, 3.68s/it] 73%|███████▎ | 381/520 [24:05<08:30, 3.67s/it] {'loss': 1.2424, 'grad_norm': 0.001614209550126007, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:05<08:30, 3.67s/it] 73%|███████▎ | 382/520 [24:09<08:26, 3.67s/it] {'loss': 1.2578, 'grad_norm': 0.001601368228041657, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:09<08:26, 3.67s/it] 74%|███████▎ | 383/520 [24:12<08:22, 3.67s/it] {'loss': 1.0804, 'grad_norm': 0.0017196910047109712, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:12<08:22, 3.67s/it] 74%|███████▍ | 384/520 [24:16<08:18, 3.66s/it] {'loss': 1.324, 'grad_norm': 0.0015493770337082129, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:16<08:18, 3.66s/it] 74%|███████▍ | 385/520 [24:20<08:15, 3.67s/it] {'loss': 1.2161, 'grad_norm': 0.0015257429215887219, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:20<08:15, 3.67s/it] 74%|███████▍ | 386/520 [24:23<08:11, 3.67s/it] {'loss': 1.1693, 'grad_norm': 0.0013935855872166333, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:23<08:11, 3.67s/it] 74%|███████▍ | 387/520 [24:27<08:06, 3.66s/it] {'loss': 1.3286, 'grad_norm': 0.0016414242980987974, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:27<08:06, 3.66s/it] 75%|███████▍ | 388/520 [24:31<08:04, 3.67s/it] {'loss': 1.1144, 'grad_norm': 0.0014855608329890119, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:31<08:04, 3.67s/it] 75%|███████▍ | 389/520 [24:34<08:07, 3.72s/it] {'loss': 1.168, 'grad_norm': 0.0019077390159117466, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:34<08:07, 3.72s/it] 75%|███████▌ | 390/520 [24:38<08:05, 3.73s/it] {'loss': 1.2361, 'grad_norm': 0.0015201996964961456, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:38<08:05, 3.73s/it] 75%|███████▌ | 391/520 [24:42<07:59, 3.72s/it] {'loss': 1.3165, 'grad_norm': 0.0016870248950836167, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:42<07:59, 3.72s/it] 75%|███████▌ | 392/520 [24:45<07:53, 3.70s/it] {'loss': 1.1252, 'grad_norm': 0.0015398875392807598, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:45<07:53, 3.70s/it] 76%|███████▌ | 393/520 [24:49<07:48, 3.69s/it] {'loss': 1.1465, 'grad_norm': 0.0014500829934791816, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:49<07:48, 3.69s/it] 76%|███████▌ | 394/520 [24:53<07:43, 3.68s/it] {'loss': 1.1893, 'grad_norm': 0.0017259217609586605, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:53<07:43, 3.68s/it] 76%|███████▌ | 395/520 [24:56<07:39, 3.67s/it] {'loss': 1.1531, 'grad_norm': 0.001739948607121838, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:56<07:39, 3.67s/it] 76%|███████▌ | 396/520 [25:00<07:34, 3.67s/it] {'loss': 1.2393, 'grad_norm': 0.0017589569680167866, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:00<07:34, 3.67s/it] 76%|███████▋ | 397/520 [25:04<07:33, 3.69s/it] {'loss': 1.2217, 'grad_norm': 0.0015913447586018193, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:04<07:33, 3.69s/it] 77%|███████▋ | 398/520 [25:08<07:28, 3.68s/it] {'loss': 1.2195, 'grad_norm': 0.001688917944087096, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:08<07:28, 3.68s/it] 77%|███████▋ | 399/520 [25:11<07:34, 3.75s/it] {'loss': 1.1989, 'grad_norm': 0.0015865185373342783, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:11<07:34, 3.75s/it] 77%|███████▋ | 400/520 [25:15<07:30, 3.75s/it] {'loss': 1.2291, 'grad_norm': 0.001551124245723901, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:15<07:30, 3.75s/it] 77%|███████▋ | 401/520 [25:19<07:23, 3.73s/it] {'loss': 1.0415, 'grad_norm': 0.001700020274306294, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:19<07:23, 3.73s/it] 77%|███████▋ | 402/520 [25:23<07:16, 3.70s/it] {'loss': 1.1653, 'grad_norm': 0.0016776202575816232, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:23<07:16, 3.70s/it] 78%|███████▊ | 403/520 [25:26<07:12, 3.69s/it] {'loss': 1.197, 'grad_norm': 0.0017695988956180737, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:26<07:12, 3.69s/it] 78%|███████▊ | 404/520 [25:30<07:08, 3.69s/it] {'loss': 1.097, 'grad_norm': 0.001930096886627576, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:30<07:08, 3.69s/it] 78%|███████▊ | 405/520 [25:34<07:04, 3.69s/it] {'loss': 1.199, 'grad_norm': 0.001614400345176137, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:34<07:04, 3.69s/it] 78%|███████▊ | 406/520 [25:37<07:00, 3.69s/it] {'loss': 1.1258, 'grad_norm': 0.0018246939749030613, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:37<07:00, 3.69s/it] 78%|███████▊ | 407/520 [25:41<06:56, 3.68s/it] {'loss': 1.2858, 'grad_norm': 0.0016825872998105095, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:41<06:56, 3.68s/it] 78%|███████▊ | 408/520 [25:45<06:51, 3.67s/it] {'loss': 1.1772, 'grad_norm': 0.001768053290934491, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:45<06:51, 3.67s/it] 79%|███████▊ | 409/520 [25:48<06:47, 3.67s/it] {'loss': 1.3043, 'grad_norm': 0.001730895360984343, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:48<06:47, 3.67s/it] 79%|███████▉ | 410/520 [25:52<06:43, 3.67s/it] {'loss': 1.0303, 'grad_norm': 0.0015877283972124915, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:52<06:43, 3.67s/it] 79%|███████▉ | 411/520 [25:56<06:39, 3.66s/it] {'loss': 1.2854, 'grad_norm': 0.001915178932797099, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:56<06:39, 3.66s/it] 79%|███████▉ | 412/520 [25:59<06:35, 3.66s/it] {'loss': 1.1925, 'grad_norm': 0.001629821876090772, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:59<06:35, 3.66s/it] 79%|███████▉ | 413/520 [26:03<06:30, 3.65s/it] {'loss': 1.2242, 'grad_norm': 0.0016105290752491067, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:03<06:30, 3.65s/it] 80%|███████▉ | 414/520 [26:07<06:27, 3.66s/it] {'loss': 1.0226, 'grad_norm': 0.0014073409280485577, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:07<06:27, 3.66s/it] 80%|███████▉ | 415/520 [26:10<06:26, 3.68s/it] {'loss': 1.1666, 'grad_norm': 0.0015372816868932568, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:10<06:26, 3.68s/it] 80%|████████ | 416/520 [26:14<06:22, 3.67s/it] {'loss': 1.0893, 'grad_norm': 0.001810765382480029, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:14<06:22, 3.67s/it] 80%|████████ | 417/520 [26:18<06:16, 3.66s/it] {'loss': 1.2486, 'grad_norm': 0.0017573327941261495, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:18<06:16, 3.66s/it] 80%|████████ | 418/520 [26:21<06:13, 3.66s/it] {'loss': 1.2386, 'grad_norm': 0.0016140054937369458, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:21<06:13, 3.66s/it] 81%|████████ | 419/520 [26:25<06:09, 3.66s/it] {'loss': 1.224, 'grad_norm': 0.0017506304643368441, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:25<06:09, 3.66s/it] 81%|████████ | 420/520 [26:28<06:05, 3.66s/it] {'loss': 1.1123, 'grad_norm': 0.001704177911408827, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:28<06:05, 3.66s/it] 81%|████████ | 421/520 [26:32<06:03, 3.67s/it] {'loss': 1.0501, 'grad_norm': 0.0020188450998816923, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:32<06:03, 3.67s/it] 81%|████████ | 422/520 [26:36<05:59, 3.66s/it] {'loss': 1.1661, 'grad_norm': 0.0016441754781975168, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:36<05:59, 3.66s/it] 81%|████████▏ | 423/520 [26:40<05:56, 3.67s/it] {'loss': 1.1549, 'grad_norm': 0.0018663613830162133, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:40<05:56, 3.67s/it] 82%|████████▏ | 424/520 [26:43<05:51, 3.67s/it] {'loss': 1.3077, 'grad_norm': 0.0017212621395811104, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:43<05:51, 3.67s/it] 82%|████████▏ | 425/520 [26:47<05:47, 3.65s/it] {'loss': 1.165, 'grad_norm': 0.001626299809599149, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:47<05:47, 3.65s/it] 82%|████████▏ | 426/520 [26:50<05:43, 3.65s/it] {'loss': 1.1892, 'grad_norm': 0.0020879312135012747, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:50<05:43, 3.65s/it] 82%|████████▏ | 427/520 [26:54<05:39, 3.65s/it] {'loss': 1.1003, 'grad_norm': 0.0015421519245730874, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:54<05:39, 3.65s/it] 82%|████████▏ | 428/520 [26:58<05:35, 3.64s/it] {'loss': 1.0732, 'grad_norm': 0.001669199031678073, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:58<05:35, 3.64s/it] 82%|████████▎ | 429/520 [27:01<05:32, 3.65s/it] {'loss': 1.173, 'grad_norm': 0.001614218609903845, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:01<05:32, 3.65s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:05<05:28, 3.65s/it] {'loss': 1.1733, 'grad_norm': 0.0014995162318101217, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:05<05:28, 3.65s/it] 83%|████████▎ | 431/520 [27:09<05:25, 3.66s/it] {'loss': 1.1947, 'grad_norm': 0.0018791305214014937, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:09<05:25, 3.66s/it] 83%|████████▎ | 432/520 [27:12<05:23, 3.67s/it] {'loss': 1.0874, 'grad_norm': 0.0016769752152827795, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:12<05:23, 3.67s/it] 83%|████████▎ | 433/520 [27:16<05:18, 3.67s/it] {'loss': 1.2192, 'grad_norm': 0.001634809824793844, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:16<05:18, 3.67s/it] 83%|████████▎ | 434/520 [27:20<05:21, 3.74s/it] {'loss': 0.9601, 'grad_norm': 0.0015957600097326484, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:20<05:21, 3.74s/it] 84%|████████▎ | 435/520 [27:24<05:20, 3.77s/it] {'loss': 1.2605, 'grad_norm': 0.0019307180912603791, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:24<05:20, 3.77s/it] 84%|████████▍ | 436/520 [27:28<05:15, 3.75s/it] {'loss': 1.049, 'grad_norm': 0.00166425188658341, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:28<05:15, 3.75s/it] 84%|████████▍ | 437/520 [27:31<05:10, 3.74s/it] {'loss': 1.2824, 'grad_norm': 0.0016713103596011339, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:31<05:10, 3.74s/it] 84%|████████▍ | 438/520 [27:35<05:04, 3.72s/it] {'loss': 1.0898, 'grad_norm': 0.0016712132054713617, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:35<05:04, 3.72s/it] 84%|████████▍ | 439/520 [27:39<04:59, 3.70s/it] {'loss': 1.1674, 'grad_norm': 0.0014277903836737555, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:39<04:59, 3.70s/it] 85%|████████▍ | 440/520 [27:42<04:54, 3.68s/it] {'loss': 1.1375, 'grad_norm': 0.001967941349281495, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:42<04:54, 3.68s/it] 85%|████████▍ | 441/520 [27:46<04:50, 3.68s/it] {'loss': 1.1829, 'grad_norm': 0.0016382119787190187, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:46<04:50, 3.68s/it] 85%|████████▌ | 442/520 [27:50<04:46, 3.67s/it] {'loss': 1.1952, 'grad_norm': 0.0017736326917507721, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:50<04:46, 3.67s/it] 85%|████████▌ | 443/520 [27:53<04:42, 3.66s/it] {'loss': 1.2122, 'grad_norm': 0.0016699407309267577, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:53<04:42, 3.66s/it] 85%|████████▌ | 444/520 [27:57<04:38, 3.67s/it] {'loss': 1.1699, 'grad_norm': 0.0014724609926438752, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:57<04:38, 3.67s/it] 86%|████████▌ | 445/520 [28:01<04:34, 3.67s/it] {'loss': 1.1011, 'grad_norm': 0.0015976732146527326, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:01<04:34, 3.67s/it] 86%|████████▌ | 446/520 [28:04<04:31, 3.66s/it] {'loss': 1.2669, 'grad_norm': 0.001546317280518979, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:04<04:31, 3.66s/it] 86%|████████▌ | 447/520 [28:08<04:27, 3.67s/it] {'loss': 1.1902, 'grad_norm': 0.0018295788061838045, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:08<04:27, 3.67s/it] 86%|████████▌ | 448/520 [28:12<04:24, 3.67s/it] {'loss': 1.1675, 'grad_norm': 0.0017447038963714274, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:12<04:24, 3.67s/it] 86%|████████▋ | 449/520 [28:15<04:21, 3.68s/it] {'loss': 1.2203, 'grad_norm': 0.0017174765142991726, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:15<04:21, 3.68s/it] 87%|████████▋ | 450/520 [28:19<04:17, 3.68s/it] {'loss': 1.2053, 'grad_norm': 0.0016804127932740051, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:19<04:17, 3.68s/it] 87%|████████▋ | 451/520 [28:23<04:13, 3.68s/it] {'loss': 1.1945, 'grad_norm': 0.0016768764088767503, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:23<04:13, 3.68s/it] 87%|████████▋ | 452/520 [28:26<04:09, 3.66s/it] {'loss': 1.2605, 'grad_norm': 0.0015439029886146123, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:26<04:09, 3.66s/it] 87%|████████▋ | 453/520 [28:30<04:06, 3.67s/it] {'loss': 1.2326, 'grad_norm': 0.0015674341261523155, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:30<04:06, 3.67s/it] 87%|████████▋ | 454/520 [28:34<04:02, 3.67s/it] {'loss': 1.1077, 'grad_norm': 0.0017119494135829799, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:34<04:02, 3.67s/it] 88%|████████▊ | 455/520 [28:37<03:58, 3.67s/it] {'loss': 1.2442, 'grad_norm': 0.0016239152425970632, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:37<03:58, 3.67s/it] 88%|████████▊ | 456/520 [28:41<03:54, 3.67s/it] {'loss': 1.1675, 'grad_norm': 0.0017084673499823916, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:41<03:54, 3.67s/it] 88%|████████▊ | 457/520 [28:45<03:50, 3.66s/it] {'loss': 1.1658, 'grad_norm': 0.001499046190700168, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:45<03:50, 3.66s/it] 88%|████████▊ | 458/520 [28:48<03:46, 3.66s/it] {'loss': 1.31, 'grad_norm': 0.0018018573511216775, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:48<03:46, 3.66s/it] 88%|████████▊ | 459/520 [28:52<03:43, 3.66s/it] {'loss': 1.2345, 'grad_norm': 0.0017770913534819715, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:52<03:43, 3.66s/it] 88%|████████▊ | 460/520 [28:56<03:39, 3.66s/it] {'loss': 1.113, 'grad_norm': 0.0016345894166337977, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:56<03:39, 3.66s/it] 89%|████████▊ | 461/520 [28:59<03:36, 3.67s/it] {'loss': 1.2498, 'grad_norm': 0.001337813628924635, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:59<03:36, 3.67s/it] 89%|████████▉ | 462/520 [29:03<03:32, 3.66s/it] {'loss': 1.309, 'grad_norm': 0.0015964313614033373, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:03<03:32, 3.66s/it] 89%|████████▉ | 463/520 [29:07<03:28, 3.66s/it] {'loss': 1.0777, 'grad_norm': 0.0017293203522650673, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:07<03:28, 3.66s/it] 89%|████████▉ | 464/520 [29:10<03:24, 3.66s/it] {'loss': 1.2195, 'grad_norm': 0.0016810133478936666, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:10<03:24, 3.66s/it] 89%|████████▉ | 465/520 [29:14<03:20, 3.65s/it] {'loss': 1.325, 'grad_norm': 0.0017782755002260582, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:14<03:20, 3.65s/it] 90%|████████▉ | 466/520 [29:17<03:17, 3.65s/it] {'loss': 1.2014, 'grad_norm': 0.0014656046160945433, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:17<03:17, 3.65s/it] 90%|████████▉ | 467/520 [29:21<03:14, 3.67s/it] {'loss': 1.1975, 'grad_norm': 0.0015551682786066371, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:21<03:14, 3.67s/it] 90%|█████████ | 468/520 [29:25<03:10, 3.67s/it] {'loss': 1.1809, 'grad_norm': 0.0018930389065242943, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:25<03:10, 3.67s/it] 90%|█████████ | 469/520 [29:29<03:07, 3.68s/it] {'loss': 1.2407, 'grad_norm': 0.001799383641692955, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:29<03:07, 3.68s/it] 90%|█████████ | 470/520 [29:32<03:03, 3.67s/it] {'loss': 1.1198, 'grad_norm': 0.0014818708169123075, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:32<03:03, 3.67s/it] 91%|█████████ | 471/520 [29:36<02:59, 3.67s/it] {'loss': 1.1442, 'grad_norm': 0.0017488959968762542, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:36<02:59, 3.67s/it] 91%|█████████ | 472/520 [29:40<02:56, 3.67s/it] {'loss': 1.1136, 'grad_norm': 0.0016578903771245401, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:40<02:56, 3.67s/it] 91%|█████████ | 473/520 [29:43<02:51, 3.66s/it] {'loss': 1.1699, 'grad_norm': 0.0016425978224233317, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:43<02:51, 3.66s/it] 91%|█████████ | 474/520 [29:47<02:47, 3.65s/it] {'loss': 1.2306, 'grad_norm': 0.001555097603817431, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:47<02:47, 3.65s/it] 91%|█████████▏| 475/520 [29:50<02:45, 3.67s/it] {'loss': 1.1508, 'grad_norm': 0.0015852008857769441, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:51<02:45, 3.67s/it] 92%|█████████▏| 476/520 [29:54<02:40, 3.66s/it] {'loss': 1.1623, 'grad_norm': 0.0016671923260056787, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:54<02:40, 3.66s/it] 92%|█████████▏| 477/520 [29:58<02:37, 3.65s/it] {'loss': 1.1488, 'grad_norm': 0.0018068005829736569, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:58<02:37, 3.65s/it] 92%|█████████▏| 478/520 [30:01<02:33, 3.66s/it] {'loss': 1.1091, 'grad_norm': 0.0017134554043945194, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:01<02:33, 3.66s/it] 92%|█████████▏| 479/520 [30:05<02:29, 3.66s/it] {'loss': 1.1953, 'grad_norm': 0.0017932220525582424, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:05<02:29, 3.66s/it] 92%|█████████▏| 480/520 [30:09<02:26, 3.65s/it] {'loss': 1.2233, 'grad_norm': 0.0015977242672534431, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:09<02:26, 3.65s/it] 92%|█████████▎| 481/520 [30:12<02:22, 3.66s/it] {'loss': 1.2153, 'grad_norm': 0.0014925576866687904, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:12<02:22, 3.66s/it] 93%|█████████▎| 482/520 [30:16<02:18, 3.66s/it] {'loss': 1.2297, 'grad_norm': 0.0018649493938581562, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:16<02:18, 3.66s/it] 93%|█████████▎| 483/520 [30:20<02:15, 3.66s/it] {'loss': 1.1799, 'grad_norm': 0.001742278718347871, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:20<02:15, 3.66s/it] 93%|█████████▎| 484/520 [30:23<02:11, 3.65s/it] {'loss': 1.1772, 'grad_norm': 0.001633192055176589, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:23<02:11, 3.65s/it] 93%|█████████▎| 485/520 [30:27<02:08, 3.66s/it] {'loss': 1.1344, 'grad_norm': 0.001555706834688869, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:27<02:08, 3.66s/it] 93%|█████████▎| 486/520 [30:31<02:04, 3.65s/it] {'loss': 1.2558, 'grad_norm': 0.0016553174691554132, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:31<02:04, 3.65s/it] 94%|█████████▎| 487/520 [30:34<02:00, 3.65s/it] {'loss': 1.1084, 'grad_norm': 0.001589086619041877, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:34<02:00, 3.65s/it] 94%|█████████▍| 488/520 [30:38<01:56, 3.64s/it] {'loss': 1.0539, 'grad_norm': 0.0016357470683410066, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:38<01:56, 3.64s/it] 94%|█████████▍| 489/520 [30:42<01:53, 3.65s/it] {'loss': 1.2245, 'grad_norm': 0.001472515571649712, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:42<01:53, 3.65s/it] 94%|█████████▍| 490/520 [30:45<01:49, 3.64s/it] {'loss': 1.177, 'grad_norm': 0.0017012417329146815, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:45<01:49, 3.64s/it] 94%|█████████▍| 491/520 [30:49<01:45, 3.64s/it] {'loss': 1.1369, 'grad_norm': 0.001709490220557341, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:49<01:45, 3.64s/it] 95%|█████████▍| 492/520 [30:53<01:41, 3.64s/it] {'loss': 1.2519, 'grad_norm': 0.0017173343584238612, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:53<01:41, 3.64s/it] 95%|█████████▍| 493/520 [30:56<01:38, 3.64s/it] {'loss': 1.2424, 'grad_norm': 0.0016784976274621599, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:56<01:38, 3.64s/it] 95%|█████████▌| 494/520 [31:00<01:34, 3.65s/it] {'loss': 1.194, 'grad_norm': 0.0015358453843140464, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:00<01:34, 3.65s/it] 95%|█████████▌| 495/520 [31:03<01:30, 3.63s/it] {'loss': 1.1514, 'grad_norm': 0.0016029869129127565, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:03<01:30, 3.63s/it] 95%|█████████▌| 496/520 [31:07<01:27, 3.64s/it] {'loss': 1.0714, 'grad_norm': 0.0017094384739518747, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:07<01:27, 3.64s/it] 96%|█████████▌| 497/520 [31:11<01:23, 3.65s/it] {'loss': 1.1564, 'grad_norm': 0.0014188185591408546, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:11<01:23, 3.65s/it] 96%|█████████▌| 498/520 [31:14<01:20, 3.64s/it] {'loss': 1.1574, 'grad_norm': 0.0017447960034220513, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:14<01:20, 3.64s/it] 96%|█████████▌| 499/520 [31:18<01:16, 3.65s/it] {'loss': 1.2977, 'grad_norm': 0.0016768910790561108, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:18<01:16, 3.65s/it] 96%|█████████▌| 500/520 [31:22<01:12, 3.64s/it] {'loss': 1.2709, 'grad_norm': 0.002133980950454975, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:22<01:12, 3.64s/it] 96%|█████████▋| 501/520 [31:25<01:09, 3.64s/it] {'loss': 1.2042, 'grad_norm': 0.0017491335947608145, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:25<01:09, 3.64s/it] 97%|█████████▋| 502/520 [31:29<01:05, 3.63s/it] {'loss': 1.1924, 'grad_norm': 0.0015682406715703178, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:29<01:05, 3.63s/it] 97%|█████████▋| 503/520 [31:33<01:01, 3.64s/it] {'loss': 1.1881, 'grad_norm': 0.0016392211231149461, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:33<01:01, 3.64s/it] 97%|█████████▋| 504/520 [31:36<00:58, 3.64s/it] {'loss': 1.1896, 'grad_norm': 0.0018223272584551888, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:36<00:58, 3.64s/it] 97%|█████████▋| 505/520 [31:40<00:54, 3.64s/it] {'loss': 1.2212, 'grad_norm': 0.0016864792861672022, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:40<00:54, 3.64s/it] 97%|█████████▋| 506/520 [31:43<00:50, 3.63s/it] {'loss': 1.1433, 'grad_norm': 0.0017109346531580904, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:43<00:50, 3.63s/it] 98%|█████████▊| 507/520 [31:47<00:47, 3.64s/it] {'loss': 1.3437, 'grad_norm': 0.001571419308734898, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:47<00:47, 3.64s/it] 98%|█████████▊| 508/520 [31:51<00:43, 3.64s/it] {'loss': 1.2639, 'grad_norm': 0.0016677426691566874, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:51<00:43, 3.64s/it] 98%|█████████▊| 509/520 [31:54<00:40, 3.64s/it] {'loss': 1.2239, 'grad_norm': 0.0015598831506800459, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:54<00:40, 3.64s/it] 98%|█████████▊| 510/520 [31:58<00:36, 3.66s/it] {'loss': 1.1832, 'grad_norm': 0.001596772143606804, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:58<00:36, 3.66s/it] 98%|█████████▊| 511/520 [32:02<00:32, 3.65s/it] {'loss': 1.157, 'grad_norm': 0.001569582738961551, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:02<00:32, 3.65s/it] 98%|█████████▊| 512/520 [32:05<00:29, 3.68s/it] {'loss': 1.0414, 'grad_norm': 0.0016692872279000974, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:05<00:29, 3.68s/it] 99%|█████████▊| 513/520 [32:09<00:25, 3.67s/it] {'loss': 1.2453, 'grad_norm': 0.0018327444459812839, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:09<00:25, 3.67s/it] 99%|█████████▉| 514/520 [32:13<00:22, 3.67s/it] {'loss': 1.2085, 'grad_norm': 0.001528422716491307, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:13<00:22, 3.67s/it] 99%|█████████▉| 515/520 [32:16<00:18, 3.66s/it] {'loss': 1.2629, 'grad_norm': 0.001859632754553769, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:16<00:18, 3.66s/it] 99%|█████████▉| 516/520 [32:20<00:14, 3.66s/it] {'loss': 1.1592, 'grad_norm': 0.0016081373824005788, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:20<00:14, 3.66s/it] 99%|█████████▉| 517/520 [32:24<00:10, 3.64s/it] {'loss': 1.2356, 'grad_norm': 0.0015865132222837675, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:24<00:10, 3.64s/it] 100%|█████████▉| 518/520 [32:27<00:07, 3.63s/it] {'loss': 1.1792, 'grad_norm': 0.0017046381961448138, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:27<00:07, 3.63s/it] 100%|█████████▉| 519/520 [32:31<00:03, 3.62s/it] {'loss': 1.199, 'grad_norm': 0.001622184294838896, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:31<00:03, 3.62s/it] 100%|██████████| 520/520 [32:35<00:00, 3.89s/it] {'loss': 1.2183, 'grad_norm': 0.0015790765764115932, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:35<00:00, 3.89s/it] {'train_runtime': 1955.9457, 'train_samples_per_second': 34.014, 'train_steps_per_second': 0.266, 'train_loss': 1.293173605432877, 'epoch': 1.0} + 100%|██████████| 520/520 [32:35<00:00, 3.89s/it] 100%|██████████| 520/520 [32:35<00:00, 3.76s/it] +[2025-10-13 07:31:45,690] [INFO] [launch.py:348:main] Process 574582 exits successfully. +[2025-10-13 07:31:46,692] [INFO] [launch.py:348:main] Process 574577 exits successfully. +[2025-10-13 07:31:46,692] [INFO] [launch.py:348:main] Process 574581 exits successfully. +[2025-10-13 07:31:46,693] [INFO] [launch.py:348:main] Process 574580 exits successfully. +[2025-10-13 07:31:46,693] [INFO] [launch.py:348:main] Process 574579 exits successfully. +[2025-10-13 07:31:46,693] [INFO] [launch.py:348:main] Process 574576 exits successfully. +[2025-10-13 07:31:47,695] [INFO] [launch.py:348:main] Process 574578 exits successfully. +[2025-10-13 07:31:50,699] [INFO] [launch.py:348:main] Process 574575 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.3_2e-1_connector-3.0_1.3_2e-1_ablation_20251013_065736.log +Timestamp: 2025-10-13 07:31:53 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation_20251013_073153.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation_20251013_073153.log new file mode 100644 index 0000000000000000000000000000000000000000..11991b7aa23f45ed8e460f598248199aa38d7b96 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation_20251013_073153.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation_20251013_073153.log +Timestamp: 2025-10-13 07:31:53 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 07:31:56,064] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:31:58,735] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 07:31:58,736] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 1.5 --temperature_mlp_text 1.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 1.5 --temperature_mlp_vision 1.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 1.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 07:32:01,331] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:02,390] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 07:32:02,390] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 07:32:02,390] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 07:32:02,390] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 07:32:02,390] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 07:32:02,390] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 07:32:02,390] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 07:32:02,392] [INFO] [launch.py:253:main] process 594753 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 07:32:02,395] [INFO] [launch.py:253:main] process 594754 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 07:32:02,397] [INFO] [launch.py:253:main] process 594755 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 07:32:02,399] [INFO] [launch.py:253:main] process 594756 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 07:32:02,401] [INFO] [launch.py:253:main] process 594757 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 07:32:02,403] [INFO] [launch.py:253:main] process 594758 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 07:32:02,405] [INFO] [launch.py:253:main] process 594759 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 07:32:02,408] [INFO] [launch.py:253:main] process 594760 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 07:32:09,019] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:09,242] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:09,309] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:09,346] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:09,346] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:09,372] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:09,372] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:09,375] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 07:32:09,431] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 07:32:09,647] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 07:32:09,710] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 07:32:09,752] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 07:32:09,753] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 07:32:09,753] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 07:32:09,779] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 07:32:09,781] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 07:32:09,786] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.5, 'temperature_mlp': 1.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.5, + "temperature_mlp": 1.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:594753:594753 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594753:594753 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594753:594753 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:594753:594753 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:594753:594753 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:594753:594753 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:594757:594757 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:594757:594757 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594757:594757 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594757:594757 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:594757:594757 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:594757:594757 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:594759:594759 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:594759:594759 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594759:594759 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594759:594759 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:594759:594759 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:594759:594759 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:594756:594756 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:594756:594756 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594758:594758 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:594756:594756 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594758:594758 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594758:594758 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594756:594756 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:594756:594756 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:594756:594756 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:594758:594758 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:594758:594758 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:594758:594758 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:594760:594760 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:594760:594760 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594760:594760 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594760:594760 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:594760:594760 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:594760:594760 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:594755:594755 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:594755:594755 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594755:594755 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594755:594755 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:594755:594755 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:594755:594755 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:594754:594754 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:594754:594754 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594754:594754 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594754:594754 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:594754:594754 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:594754:594754 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO ncclCommInitRank comm 0x55f8dd990240 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5200377a723d74ac - Init START +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO ncclCommInitRank comm 0x5630e3d67e60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5200377a723d74ac - Init START +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO ncclCommInitRank comm 0x561366a03000 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5200377a723d74ac - Init START +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO ncclCommInitRank comm 0x56284a730c50 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5200377a723d74ac - Init START +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO ncclCommInitRank comm 0x559dcdcd5920 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5200377a723d74ac - Init START +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO ncclCommInitRank comm 0x561b0ca08650 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5200377a723d74ac - Init START +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO ncclCommInitRank comm 0x562d88395690 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5200377a723d74ac - Init START +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO ncclCommInitRank comm 0x564212c5e5a0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5200377a723d74ac - Init START +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO comm 0x55f8dd990240 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO comm 0x561b0ca08650 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO comm 0x559dcdcd5920 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO comm 0x564212c5e5a0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO comm 0x561366a03000 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO comm 0x5630e3d67e60 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO comm 0x562d88395690 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO comm 0x56284a730c50 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:594760:596353 [7] NCCL INFO ncclCommInitRank comm 0x559dcdcd5920 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5200377a723d74ac - Init COMPLETE +ywang29-vrdb-test1-worker-0:594757:596349 [4] NCCL INFO ncclCommInitRank comm 0x562d88395690 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5200377a723d74ac - Init COMPLETE +ywang29-vrdb-test1-worker-0:594758:596351 [5] NCCL INFO ncclCommInitRank comm 0x5630e3d67e60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5200377a723d74ac - Init COMPLETE +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:594759:596350 [6] NCCL INFO ncclCommInitRank comm 0x561366a03000 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5200377a723d74ac - Init COMPLETE +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:594754:596383 [1] NCCL INFO ncclCommInitRank comm 0x55f8dd990240 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5200377a723d74ac - Init COMPLETE +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:594756:596352 [3] NCCL INFO ncclCommInitRank comm 0x56284a730c50 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5200377a723d74ac - Init COMPLETE +ywang29-vrdb-test1-worker-0:594753:596348 [0] NCCL INFO ncclCommInitRank comm 0x561b0ca08650 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5200377a723d74ac - Init COMPLETE +ywang29-vrdb-test1-worker-0:594755:596366 [2] NCCL INFO ncclCommInitRank comm 0x564212c5e5a0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5200377a723d74ac - Init COMPLETE +[2025-10-13 07:32:47,733] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 07:32:49,487] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 07:33:07,317 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 07:33:07,322 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:004->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:594753:601295 [0] NCCL INFO ncclCommInitRank comm 0x7fbc6c06ac80 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc1ca70e66eef0fdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:594757:601302 [4] NCCL INFO ncclCommInitRank comm 0x7fe4d406ae10 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc1ca70e66eef0fdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:594759:601297 [6] NCCL INFO ncclCommInitRank comm 0x7fc3e006a710 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc1ca70e66eef0fdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:594755:601301 [2] NCCL INFO ncclCommInitRank comm 0x7fcb3806ade0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc1ca70e66eef0fdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:594760:601298 [7] NCCL INFO ncclCommInitRank comm 0x7f56f806a9e0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc1ca70e66eef0fdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:594756:601300 [3] NCCL INFO ncclCommInitRank comm 0x7f4c4c06b2f0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc1ca70e66eef0fdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:594758:601296 [5] NCCL INFO ncclCommInitRank comm 0x7f16a006af00 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc1ca70e66eef0fdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:594754:601299 [1] NCCL INFO ncclCommInitRank comm 0x7f589006ab70 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc1ca70e66eef0fdd - Init COMPLETE + 0%| | 1/520 [00:14<2:03:09, 14.24s/it] {'loss': 2.2867, 'grad_norm': 0.045378101277949424, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:03:09, 14.24s/it] 0%| | 2/520 [00:18<1:10:13, 8.14s/it] {'loss': 2.2393, 'grad_norm': 0.04697132189897136, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:10:13, 8.14s/it] 1%| | 3/520 [00:21<53:14, 6.18s/it] {'loss': 1.792, 'grad_norm': 0.020763107716589704, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<53:14, 6.18s/it] 1%| | 4/520 [00:25<45:15, 5.26s/it] {'loss': 1.638, 'grad_norm': 0.008500646175004328, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<45:15, 5.26s/it] 1%| | 5/520 [00:29<40:50, 4.76s/it] {'loss': 1.6977, 'grad_norm': 0.01625219017795076, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:50, 4.76s/it] 1%| | 6/520 [00:33<38:14, 4.46s/it] {'loss': 1.4743, 'grad_norm': 0.00789375345086673, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:33<38:14, 4.46s/it] 1%|▏ | 7/520 [00:37<36:30, 4.27s/it] {'loss': 1.4541, 'grad_norm': 0.007719990188295674, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:37<36:30, 4.27s/it] 2%|▏ | 8/520 [00:41<36:57, 4.33s/it] {'loss': 1.5047, 'grad_norm': 0.006743651929448017, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:57, 4.33s/it] 2%|▏ | 9/520 [00:45<35:38, 4.18s/it] {'loss': 1.5781, 'grad_norm': 0.007201721039721101, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<35:38, 4.18s/it] 2%|▏ | 10/520 [00:49<34:39, 4.08s/it] {'loss': 1.3965, 'grad_norm': 0.005125762356592713, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<34:39, 4.08s/it] 2%|▏ | 11/520 [00:53<34:21, 4.05s/it] {'loss': 1.4714, 'grad_norm': 0.00473650269619234, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:21, 4.05s/it] 2%|▏ | 12/520 [00:57<33:48, 3.99s/it] {'loss': 1.4038, 'grad_norm': 0.0043375869168087245, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<33:48, 3.99s/it][2025-10-13 07:34:14,347] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<34:47, 4.12s/it] {'loss': 1.4248, 'grad_norm': 0.003505359812509816, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<34:47, 4.12s/it] 3%|▎ | 14/520 [01:05<33:30, 3.97s/it] {'loss': 1.4752, 'grad_norm': 0.004145359787524275, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<33:30, 3.97s/it] 3%|▎ | 15/520 [01:09<32:32, 3.87s/it] {'loss': 1.4679, 'grad_norm': 0.0033095655054281186, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:09<32:32, 3.87s/it] 3%|▎ | 16/520 [01:12<31:53, 3.80s/it] {'loss': 1.4244, 'grad_norm': 0.003374028704451827, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<31:53, 3.80s/it] 3%|▎ | 17/520 [01:16<31:23, 3.74s/it] {'loss': 1.5255, 'grad_norm': 0.0034781073571818837, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<31:23, 3.74s/it] 3%|▎ | 18/520 [01:19<31:02, 3.71s/it] {'loss': 1.3938, 'grad_norm': 0.003184551976515693, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<31:02, 3.71s/it] 4%|▎ | 19/520 [01:23<30:47, 3.69s/it] {'loss': 1.4534, 'grad_norm': 0.0037451447541712183, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<30:47, 3.69s/it] 4%|▍ | 20/520 [01:27<30:34, 3.67s/it] {'loss': 1.3951, 'grad_norm': 0.0035540055004907784, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:27<30:34, 3.67s/it] 4%|▍ | 21/520 [01:30<30:34, 3.68s/it] {'loss': 1.4462, 'grad_norm': 0.004390206215225588, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<30:34, 3.68s/it] 4%|▍ | 22/520 [01:34<30:33, 3.68s/it] {'loss': 1.5501, 'grad_norm': 0.004115756254952855, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:33, 3.68s/it] 4%|▍ | 23/520 [01:38<30:28, 3.68s/it] {'loss': 1.4852, 'grad_norm': 0.0033785619716500768, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:38<30:28, 3.68s/it] 5%|▍ | 24/520 [01:41<30:16, 3.66s/it] {'loss': 1.4389, 'grad_norm': 0.003358917938104122, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:16, 3.66s/it] 5%|▍ | 25/520 [01:45<30:09, 3.66s/it] {'loss': 1.484, 'grad_norm': 0.0034162162364184535, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:09, 3.66s/it] 5%|▌ | 26/520 [01:49<30:08, 3.66s/it] {'loss': 1.4829, 'grad_norm': 0.002939719670816293, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:49<30:08, 3.66s/it] 5%|▌ | 27/520 [01:52<30:01, 3.65s/it] {'loss': 1.4009, 'grad_norm': 0.0034395539918342065, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:01, 3.65s/it] 5%|▌ | 28/520 [01:56<29:56, 3.65s/it] {'loss': 1.3928, 'grad_norm': 0.0029404742489057023, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<29:56, 3.65s/it] 6%|▌ | 29/520 [02:00<29:52, 3.65s/it] {'loss': 1.4079, 'grad_norm': 0.0033700481915348657, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<29:52, 3.65s/it] 6%|▌ | 30/520 [02:03<29:50, 3.65s/it] {'loss': 1.5244, 'grad_norm': 0.0030480303975775357, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<29:50, 3.65s/it] 6%|▌ | 31/520 [02:07<29:40, 3.64s/it] {'loss': 1.3934, 'grad_norm': 0.00260924567952519, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<29:40, 3.64s/it] 6%|▌ | 32/520 [02:11<29:36, 3.64s/it] {'loss': 1.4034, 'grad_norm': 0.0037806545010756294, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:11<29:36, 3.64s/it] 6%|▋ | 33/520 [02:14<29:38, 3.65s/it] {'loss': 1.4089, 'grad_norm': 0.003101530285778325, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<29:38, 3.65s/it] 7%|▋ | 34/520 [02:18<29:33, 3.65s/it] {'loss': 1.3878, 'grad_norm': 0.00306040442230196, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<29:33, 3.65s/it] 7%|▋ | 35/520 [02:22<29:26, 3.64s/it] {'loss': 1.4026, 'grad_norm': 0.003137229844780806, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:22<29:26, 3.64s/it] 7%|▋ | 36/520 [02:25<29:21, 3.64s/it] {'loss': 1.5189, 'grad_norm': 0.002863825745893692, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<29:21, 3.64s/it] 7%|▋ | 37/520 [02:29<29:13, 3.63s/it] {'loss': 1.5049, 'grad_norm': 0.0028411164670203016, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:29<29:13, 3.63s/it] 7%|▋ | 38/520 [02:32<29:09, 3.63s/it] {'loss': 1.5974, 'grad_norm': 0.003268263828925368, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:09, 3.63s/it] 8%|▊ | 39/520 [02:36<29:05, 3.63s/it] {'loss': 1.4244, 'grad_norm': 0.003302249662789975, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:36<29:05, 3.63s/it] 8%|▊ | 40/520 [02:40<29:00, 3.63s/it] {'loss': 1.4678, 'grad_norm': 0.0026978723301392775, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:40<29:00, 3.63s/it] 8%|▊ | 41/520 [02:43<29:02, 3.64s/it] {'loss': 1.4412, 'grad_norm': 0.003384878839686392, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:02, 3.64s/it] 8%|▊ | 42/520 [02:47<28:56, 3.63s/it] {'loss': 1.4644, 'grad_norm': 0.0035427553168805945, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<28:56, 3.63s/it] 8%|▊ | 43/520 [02:51<29:01, 3.65s/it] {'loss': 1.4225, 'grad_norm': 0.003132232419872934, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<29:01, 3.65s/it] 8%|▊ | 44/520 [02:54<29:06, 3.67s/it] {'loss': 1.5162, 'grad_norm': 0.0028758318994219602, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<29:06, 3.67s/it] 9%|▊ | 45/520 [02:58<29:00, 3.67s/it] {'loss': 1.4757, 'grad_norm': 0.0028430318455470076, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<29:00, 3.67s/it] 9%|▉ | 46/520 [03:02<28:51, 3.65s/it] {'loss': 1.5933, 'grad_norm': 0.003068377365940242, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<28:51, 3.65s/it] 9%|▉ | 47/520 [03:05<28:48, 3.66s/it] {'loss': 1.4581, 'grad_norm': 0.002763992671823621, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<28:48, 3.66s/it] 9%|▉ | 48/520 [03:09<28:40, 3.65s/it] {'loss': 1.4316, 'grad_norm': 0.0030834677912234343, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<28:40, 3.65s/it] 9%|▉ | 49/520 [03:13<28:36, 3.64s/it] {'loss': 1.4749, 'grad_norm': 0.00292219417278853, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<28:36, 3.64s/it] 10%|▉ | 50/520 [03:16<28:28, 3.64s/it] {'loss': 1.4603, 'grad_norm': 0.002763802166460018, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:16<28:28, 3.64s/it] 10%|▉ | 51/520 [03:20<28:28, 3.64s/it] {'loss': 1.3835, 'grad_norm': 0.0030584841908596976, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:20<28:28, 3.64s/it] 10%|█ | 52/520 [03:23<28:22, 3.64s/it] {'loss': 1.5208, 'grad_norm': 0.002735861398303265, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:23<28:22, 3.64s/it] 10%|█ | 53/520 [03:27<28:13, 3.63s/it] {'loss': 1.5189, 'grad_norm': 0.0030020663538372293, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:27<28:13, 3.63s/it] 10%|█ | 54/520 [03:31<28:09, 3.63s/it] {'loss': 1.4084, 'grad_norm': 0.0025971637321121925, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<28:09, 3.63s/it] 11%|█ | 55/520 [03:34<28:11, 3.64s/it] {'loss': 1.3979, 'grad_norm': 0.002922053596768616, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:34<28:11, 3.64s/it] 11%|█ | 56/520 [03:38<28:08, 3.64s/it] {'loss': 1.5279, 'grad_norm': 0.0027464188325000945, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:38<28:08, 3.64s/it] 11%|█ | 57/520 [03:42<28:12, 3.66s/it] {'loss': 1.3843, 'grad_norm': 0.003717926700986435, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<28:12, 3.66s/it] 11%|█ | 58/520 [03:45<28:10, 3.66s/it] {'loss': 1.5419, 'grad_norm': 0.0021841779873059426, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:45<28:10, 3.66s/it] 11%|█▏ | 59/520 [03:49<28:10, 3.67s/it] {'loss': 1.3759, 'grad_norm': 0.002666049472470836, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:49<28:10, 3.67s/it] 12%|█▏ | 60/520 [03:53<28:04, 3.66s/it] {'loss': 1.4799, 'grad_norm': 0.003620741595115023, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:53<28:04, 3.66s/it] 12%|█▏ | 61/520 [03:56<28:07, 3.68s/it] {'loss': 1.4946, 'grad_norm': 0.0036635069435368237, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:56<28:07, 3.68s/it] 12%|█▏ | 62/520 [04:00<28:00, 3.67s/it] {'loss': 1.4466, 'grad_norm': 0.002731448857001095, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:00<28:00, 3.67s/it] 12%|█▏ | 63/520 [04:04<27:54, 3.66s/it] {'loss': 1.4274, 'grad_norm': 0.003215021015176617, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:04<27:54, 3.66s/it] 12%|█▏ | 64/520 [04:07<27:50, 3.66s/it] {'loss': 1.4667, 'grad_norm': 0.002596186365224578, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:07<27:50, 3.66s/it] 12%|█▎ | 65/520 [04:11<27:50, 3.67s/it] {'loss': 1.4666, 'grad_norm': 0.0026441834790114746, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:11<27:50, 3.67s/it] 13%|█▎ | 66/520 [04:15<27:48, 3.68s/it] {'loss': 1.4339, 'grad_norm': 0.0034551049795108445, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:15<27:48, 3.68s/it] 13%|█▎ | 67/520 [04:19<27:57, 3.70s/it] {'loss': 1.3108, 'grad_norm': 0.0023628445433301897, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:19<27:57, 3.70s/it] 13%|█▎ | 68/520 [04:22<27:46, 3.69s/it] {'loss': 1.3753, 'grad_norm': 0.002366510258992572, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:22<27:46, 3.69s/it] 13%|█▎ | 69/520 [04:26<27:35, 3.67s/it] {'loss': 1.3567, 'grad_norm': 0.003336940459396765, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:26<27:35, 3.67s/it] 13%|█▎ | 70/520 [04:29<27:30, 3.67s/it] {'loss': 1.4043, 'grad_norm': 0.002787767681937568, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:29<27:30, 3.67s/it] 14%|█▎ | 71/520 [04:33<27:29, 3.67s/it] {'loss': 1.3332, 'grad_norm': 0.002411556251554849, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:33<27:29, 3.67s/it] 14%|█▍ | 72/520 [04:37<27:18, 3.66s/it] {'loss': 1.4845, 'grad_norm': 0.0029167099283612004, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:37<27:18, 3.66s/it] 14%|█▍ | 73/520 [04:40<27:14, 3.66s/it] {'loss': 1.3072, 'grad_norm': 0.0023452950885406902, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:40<27:14, 3.66s/it] 14%|█▍ | 74/520 [04:44<27:13, 3.66s/it] {'loss': 1.4244, 'grad_norm': 0.0025689970389686265, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:44<27:13, 3.66s/it] 14%|█▍ | 75/520 [04:48<27:00, 3.64s/it] {'loss': 1.3281, 'grad_norm': 0.0025928080128953023, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:48<27:00, 3.64s/it] 15%|█▍ | 76/520 [04:51<27:03, 3.66s/it] {'loss': 1.5576, 'grad_norm': 0.003771637327681654, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:51<27:03, 3.66s/it] 15%|█▍ | 77/520 [04:55<26:57, 3.65s/it] {'loss': 1.2515, 'grad_norm': 0.002589642234271887, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:55<26:57, 3.65s/it] 15%|█▌ | 78/520 [04:59<26:55, 3.65s/it] {'loss': 1.3838, 'grad_norm': 0.0026342452258640136, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [04:59<26:55, 3.65s/it] 15%|█▌ | 79/520 [05:02<26:51, 3.65s/it] {'loss': 1.3613, 'grad_norm': 0.002479993743398484, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:02<26:51, 3.65s/it] 15%|█▌ | 80/520 [05:06<26:52, 3.66s/it] {'loss': 1.5477, 'grad_norm': 0.0029740089112267627, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:06<26:52, 3.66s/it] 16%|█▌ | 81/520 [05:10<26:48, 3.66s/it] {'loss': 1.5217, 'grad_norm': 0.0034527084165741607, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:10<26:48, 3.66s/it] 16%|█▌ | 82/520 [05:13<26:44, 3.66s/it] {'loss': 1.4461, 'grad_norm': 0.002518555927049685, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:13<26:44, 3.66s/it] 16%|█▌ | 83/520 [05:17<26:39, 3.66s/it] {'loss': 1.4658, 'grad_norm': 0.002667893377921186, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:17<26:39, 3.66s/it] 16%|█▌ | 84/520 [05:21<26:39, 3.67s/it] {'loss': 1.4672, 'grad_norm': 0.0029081098398688137, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:21<26:39, 3.67s/it] 16%|█▋ | 85/520 [05:25<27:03, 3.73s/it] {'loss': 1.4907, 'grad_norm': 0.0024129963207500564, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:25<27:03, 3.73s/it] 17%|█▋ | 86/520 [05:28<27:15, 3.77s/it] {'loss': 1.5015, 'grad_norm': 0.0026433806796283014, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:28<27:15, 3.77s/it] 17%|█▋ | 87/520 [05:32<27:23, 3.80s/it] {'loss': 1.4975, 'grad_norm': 0.002783006731820975, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:32<27:23, 3.80s/it] 17%|█▋ | 88/520 [05:36<27:30, 3.82s/it] {'loss': 1.4621, 'grad_norm': 0.003201156597198366, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:36<27:30, 3.82s/it] 17%|█▋ | 89/520 [05:40<27:32, 3.83s/it] {'loss': 1.4493, 'grad_norm': 0.002650689621398623, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:40<27:32, 3.83s/it] 17%|█▋ | 90/520 [05:44<27:35, 3.85s/it] {'loss': 1.3823, 'grad_norm': 0.0024341159054140564, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:44<27:35, 3.85s/it] 18%|█▊ | 91/520 [05:48<27:29, 3.84s/it] {'loss': 1.457, 'grad_norm': 0.0022918144374339863, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:48<27:29, 3.84s/it] 18%|█▊ | 92/520 [05:52<27:28, 3.85s/it] {'loss': 1.3895, 'grad_norm': 0.0024236677560793003, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:52<27:28, 3.85s/it] 18%|█▊ | 93/520 [05:55<27:24, 3.85s/it] {'loss': 1.4091, 'grad_norm': 0.002717863921759231, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:55<27:24, 3.85s/it] 18%|█▊ | 94/520 [05:59<27:24, 3.86s/it] {'loss': 1.4939, 'grad_norm': 0.002837018436174238, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [05:59<27:24, 3.86s/it] 18%|█▊ | 95/520 [06:03<27:20, 3.86s/it] {'loss': 1.3752, 'grad_norm': 0.002965685513275199, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:03<27:20, 3.86s/it] 18%|█▊ | 96/520 [06:07<27:19, 3.87s/it] {'loss': 1.4006, 'grad_norm': 0.0022003626251208114, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:07<27:19, 3.87s/it] 19%|█▊ | 97/520 [06:11<27:25, 3.89s/it] {'loss': 1.3584, 'grad_norm': 0.0029564910663042827, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:11<27:25, 3.89s/it] 19%|█▉ | 98/520 [06:15<27:20, 3.89s/it] {'loss': 1.3593, 'grad_norm': 0.0021779557853213887, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:15<27:20, 3.89s/it] 19%|█▉ | 99/520 [06:19<27:10, 3.87s/it] {'loss': 1.3788, 'grad_norm': 0.0025229569122321677, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:19<27:10, 3.87s/it] 19%|█▉ | 100/520 [06:22<26:41, 3.81s/it] {'loss': 1.4736, 'grad_norm': 0.00392883494631525, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:22<26:41, 3.81s/it] 19%|█▉ | 101/520 [06:26<26:17, 3.76s/it] {'loss': 1.3789, 'grad_norm': 0.002741295008206236, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:26<26:17, 3.76s/it] 20%|█▉ | 102/520 [06:30<26:02, 3.74s/it] {'loss': 1.3867, 'grad_norm': 0.0026177825656032304, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:30<26:02, 3.74s/it] 20%|█▉ | 103/520 [06:33<25:53, 3.72s/it] {'loss': 1.3074, 'grad_norm': 0.001999427890969773, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:33<25:53, 3.72s/it] 20%|██ | 104/520 [06:37<25:48, 3.72s/it] {'loss': 1.386, 'grad_norm': 0.0023163598005425915, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:37<25:48, 3.72s/it] 20%|██ | 105/520 [06:41<25:40, 3.71s/it] {'loss': 1.3856, 'grad_norm': 0.0022961803674992156, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:41<25:40, 3.71s/it] 20%|██ | 106/520 [06:45<25:33, 3.70s/it] {'loss': 1.4702, 'grad_norm': 0.0029919067771560785, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:45<25:33, 3.70s/it] 21%|██ | 107/520 [06:48<25:23, 3.69s/it] {'loss': 1.4473, 'grad_norm': 0.0029555048850722168, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:48<25:23, 3.69s/it] 21%|██ | 108/520 [06:52<25:22, 3.70s/it] {'loss': 1.345, 'grad_norm': 0.002581090139025784, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:52<25:22, 3.70s/it] 21%|██ | 109/520 [06:56<25:14, 3.68s/it] {'loss': 1.4452, 'grad_norm': 0.0022135417569099525, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:56<25:14, 3.68s/it] 21%|██ | 110/520 [06:59<25:06, 3.67s/it] {'loss': 1.5354, 'grad_norm': 0.002232341924742974, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [06:59<25:06, 3.67s/it] 21%|██▏ | 111/520 [07:03<24:59, 3.67s/it] {'loss': 1.5364, 'grad_norm': 0.0025435349947964363, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:03<24:59, 3.67s/it] 22%|██▏ | 112/520 [07:07<24:54, 3.66s/it] {'loss': 1.4139, 'grad_norm': 0.0021602396249646963, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:07<24:54, 3.66s/it] 22%|██▏ | 113/520 [07:10<24:52, 3.67s/it] {'loss': 1.2895, 'grad_norm': 0.0020438454690316957, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:10<24:52, 3.67s/it] 22%|██▏ | 114/520 [07:14<24:46, 3.66s/it] {'loss': 1.3883, 'grad_norm': 0.0022672981669240207, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:14<24:46, 3.66s/it] 22%|██▏ | 115/520 [07:18<24:45, 3.67s/it] {'loss': 1.5118, 'grad_norm': 0.0021800436941815694, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:18<24:45, 3.67s/it] 22%|██▏ | 116/520 [07:21<24:56, 3.70s/it] {'loss': 1.5063, 'grad_norm': 0.001958585635038842, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:21<24:56, 3.70s/it] 22%|██▎ | 117/520 [07:25<24:44, 3.68s/it] {'loss': 1.4833, 'grad_norm': 0.0025922212099791964, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:25<24:44, 3.68s/it] 23%|██▎ | 118/520 [07:29<24:37, 3.67s/it] {'loss': 1.3633, 'grad_norm': 0.0019067914970774418, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:29<24:37, 3.67s/it] 23%|██▎ | 119/520 [07:32<24:31, 3.67s/it] {'loss': 1.3204, 'grad_norm': 0.002210756759966818, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:32<24:31, 3.67s/it] 23%|██▎ | 120/520 [07:36<24:27, 3.67s/it] {'loss': 1.3399, 'grad_norm': 0.0024441051302525317, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:36<24:27, 3.67s/it] 23%|██▎ | 121/520 [07:40<24:20, 3.66s/it] {'loss': 1.4027, 'grad_norm': 0.0026702946419293197, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:40<24:20, 3.66s/it] 23%|██▎ | 122/520 [07:43<24:16, 3.66s/it] {'loss': 1.294, 'grad_norm': 0.0021227072041655262, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:43<24:16, 3.66s/it] 24%|██▎ | 123/520 [07:47<24:16, 3.67s/it] {'loss': 1.518, 'grad_norm': 0.01099242489750551, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:47<24:16, 3.67s/it] 24%|██▍ | 124/520 [07:51<24:23, 3.70s/it] {'loss': 1.3745, 'grad_norm': 0.002570384105282688, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:51<24:23, 3.70s/it] 24%|██▍ | 125/520 [07:55<24:43, 3.76s/it] {'loss': 1.3649, 'grad_norm': 0.0022559646803342478, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:55<24:43, 3.76s/it] 24%|██▍ | 126/520 [07:59<26:12, 3.99s/it] {'loss': 1.4242, 'grad_norm': 0.0021514019497935253, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:59<26:12, 3.99s/it] 24%|██▍ | 127/520 [08:03<25:52, 3.95s/it] {'loss': 1.3461, 'grad_norm': 0.0029204119611455845, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:03<25:52, 3.95s/it] 25%|██▍ | 128/520 [08:07<25:36, 3.92s/it] {'loss': 1.42, 'grad_norm': 0.01130354756304454, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:07<25:36, 3.92s/it] 25%|██▍ | 129/520 [08:10<24:59, 3.84s/it] {'loss': 1.3233, 'grad_norm': 0.0018837725987765582, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:10<24:59, 3.84s/it] 25%|██▌ | 130/520 [08:14<24:36, 3.79s/it] {'loss': 1.3868, 'grad_norm': 0.0021762108331150917, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:14<24:36, 3.79s/it] 25%|██▌ | 131/520 [08:18<24:17, 3.75s/it] {'loss': 1.3784, 'grad_norm': 0.0026453439304667055, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:18<24:17, 3.75s/it] 25%|██▌ | 132/520 [08:22<24:20, 3.76s/it] {'loss': 1.4302, 'grad_norm': 0.002234163138236021, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:22<24:20, 3.76s/it] 26%|██▌ | 133/520 [08:25<24:24, 3.78s/it] {'loss': 1.3336, 'grad_norm': 0.002178573841758172, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:25<24:24, 3.78s/it] 26%|██▌ | 134/520 [08:29<24:30, 3.81s/it] {'loss': 1.4187, 'grad_norm': 0.002383369795406837, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:29<24:30, 3.81s/it] 26%|██▌ | 135/520 [08:33<24:27, 3.81s/it] {'loss': 1.4953, 'grad_norm': 0.0022816283836005627, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:33<24:27, 3.81s/it] 26%|██▌ | 136/520 [08:37<24:26, 3.82s/it] {'loss': 1.4059, 'grad_norm': 0.002278872820064295, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:37<24:26, 3.82s/it] 26%|██▋ | 137/520 [08:41<24:25, 3.83s/it] {'loss': 1.3305, 'grad_norm': 0.0025536620229928315, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:41<24:25, 3.83s/it] 27%|██▋ | 138/520 [08:44<24:02, 3.78s/it] {'loss': 1.3425, 'grad_norm': 0.0021553638415712185, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:44<24:02, 3.78s/it] 27%|██▋ | 139/520 [08:48<23:41, 3.73s/it] {'loss': 1.2828, 'grad_norm': 0.0022912980666559865, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:48<23:41, 3.73s/it] 27%|██▋ | 140/520 [08:52<23:32, 3.72s/it] {'loss': 1.4367, 'grad_norm': 0.0026727329448286933, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:52<23:32, 3.72s/it] 27%|██▋ | 141/520 [08:55<23:19, 3.69s/it] {'loss': 1.4507, 'grad_norm': 0.0021725421057655872, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:55<23:19, 3.69s/it] 27%|██▋ | 142/520 [08:59<23:11, 3.68s/it] {'loss': 1.4708, 'grad_norm': 0.002220464720316986, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:59<23:11, 3.68s/it] 28%|██▊ | 143/520 [09:03<23:10, 3.69s/it] {'loss': 1.3827, 'grad_norm': 0.002302438550568782, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:03<23:10, 3.69s/it] 28%|██▊ | 144/520 [09:06<23:04, 3.68s/it] {'loss': 1.322, 'grad_norm': 0.002175140906883818, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:06<23:04, 3.68s/it] 28%|██▊ | 145/520 [09:10<23:03, 3.69s/it] {'loss': 1.2636, 'grad_norm': 0.0018987756492918578, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:10<23:03, 3.69s/it] 28%|██▊ | 146/520 [09:14<23:00, 3.69s/it] {'loss': 1.5021, 'grad_norm': 0.0021189507793586445, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:14<23:00, 3.69s/it] 28%|██▊ | 147/520 [09:17<22:54, 3.69s/it] {'loss': 1.2972, 'grad_norm': 0.0020449372642768796, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:17<22:54, 3.69s/it] 28%|██▊ | 148/520 [09:21<23:05, 3.72s/it] {'loss': 1.3408, 'grad_norm': 0.002006911683886497, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:21<23:05, 3.72s/it] 29%|██▊ | 149/520 [09:25<22:58, 3.72s/it] {'loss': 1.2854, 'grad_norm': 0.002172586126186971, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:25<22:58, 3.72s/it] 29%|██▉ | 150/520 [09:29<22:47, 3.69s/it] {'loss': 1.5219, 'grad_norm': 0.0022664449329225685, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:29<22:47, 3.69s/it] 29%|██▉ | 151/520 [09:32<22:38, 3.68s/it] {'loss': 1.3266, 'grad_norm': 0.0020449734157795708, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:32<22:38, 3.68s/it] 29%|██▉ | 152/520 [09:36<22:30, 3.67s/it] {'loss': 1.2932, 'grad_norm': 0.002083151216649477, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:36<22:30, 3.67s/it] 29%|██▉ | 153/520 [09:40<22:27, 3.67s/it] {'loss': 1.3309, 'grad_norm': 0.0021882247950676162, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:40<22:27, 3.67s/it] 30%|██▉ | 154/520 [09:43<22:26, 3.68s/it] {'loss': 1.4226, 'grad_norm': 0.0020397993982194293, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:43<22:26, 3.68s/it] 30%|██▉ | 155/520 [09:47<22:19, 3.67s/it] {'loss': 1.3252, 'grad_norm': 0.0020714866945855414, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:47<22:19, 3.67s/it] 30%|███ | 156/520 [09:51<22:16, 3.67s/it] {'loss': 1.356, 'grad_norm': 0.0021787655910149822, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:51<22:16, 3.67s/it] 30%|███ | 157/520 [09:54<22:11, 3.67s/it] {'loss': 1.5059, 'grad_norm': 0.003143499121369811, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:54<22:11, 3.67s/it] 30%|███ | 158/520 [09:58<22:13, 3.68s/it] {'loss': 1.3349, 'grad_norm': 0.002390082104938572, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:58<22:13, 3.68s/it] 31%|███ | 159/520 [10:02<22:05, 3.67s/it] {'loss': 1.36, 'grad_norm': 0.0019617906995362516, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:02<22:05, 3.67s/it] 31%|███ | 160/520 [10:05<22:03, 3.68s/it] {'loss': 1.3878, 'grad_norm': 0.002082132238910029, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:05<22:03, 3.68s/it] 31%|███ | 161/520 [10:09<21:54, 3.66s/it] {'loss': 1.369, 'grad_norm': 0.0020552419026430365, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:09<21:54, 3.66s/it] 31%|███ | 162/520 [10:13<21:53, 3.67s/it] {'loss': 1.437, 'grad_norm': 0.0028246321389691466, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:13<21:53, 3.67s/it] 31%|███▏ | 163/520 [10:16<21:46, 3.66s/it] {'loss': 1.2468, 'grad_norm': 0.002540646543107811, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:16<21:46, 3.66s/it] 32%|███▏ | 164/520 [10:20<21:42, 3.66s/it] {'loss': 1.212, 'grad_norm': 0.0020870811304053542, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:20<21:42, 3.66s/it] 32%|███▏ | 165/520 [10:24<21:40, 3.66s/it] {'loss': 1.3597, 'grad_norm': 0.001833116006669458, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:24<21:40, 3.66s/it] 32%|███▏ | 166/520 [10:28<22:01, 3.73s/it] {'loss': 1.3529, 'grad_norm': 0.002164479291956821, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:28<22:01, 3.73s/it] 32%|███▏ | 167/520 [10:31<22:16, 3.78s/it] {'loss': 1.3481, 'grad_norm': 0.002173886458131699, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:31<22:16, 3.78s/it] 32%|███▏ | 168/520 [10:35<22:24, 3.82s/it] {'loss': 1.2659, 'grad_norm': 0.002028160110290444, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:35<22:24, 3.82s/it] 32%|███▎ | 169/520 [10:39<22:25, 3.83s/it] {'loss': 1.3602, 'grad_norm': 0.0019551265695328703, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:39<22:25, 3.83s/it] 33%|███▎ | 170/520 [10:43<22:26, 3.85s/it] {'loss': 1.3513, 'grad_norm': 0.0019881218097369236, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:43<22:26, 3.85s/it] 33%|███▎ | 171/520 [10:47<22:25, 3.85s/it] {'loss': 1.2885, 'grad_norm': 0.0021184038673805034, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:47<22:25, 3.85s/it] 33%|███▎ | 172/520 [10:51<22:22, 3.86s/it] {'loss': 1.3586, 'grad_norm': 0.0020627568599592425, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:51<22:22, 3.86s/it] 33%|███▎ | 173/520 [10:55<22:22, 3.87s/it] {'loss': 1.2894, 'grad_norm': 0.0019292451804717719, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:55<22:22, 3.87s/it] 33%|███▎ | 174/520 [10:59<22:23, 3.88s/it] {'loss': 1.3599, 'grad_norm': 0.002226644389508938, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:59<22:23, 3.88s/it] 34%|███▎ | 175/520 [11:03<22:27, 3.91s/it] {'loss': 1.2642, 'grad_norm': 0.001972743148985222, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:03<22:27, 3.91s/it] 34%|███▍ | 176/520 [11:06<22:23, 3.91s/it] {'loss': 1.4455, 'grad_norm': 0.0020916329619356847, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:06<22:23, 3.91s/it] 34%|███▍ | 177/520 [11:10<22:17, 3.90s/it] {'loss': 1.3062, 'grad_norm': 0.002170192236744153, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:10<22:17, 3.90s/it] 34%|███▍ | 178/520 [11:14<22:14, 3.90s/it] {'loss': 1.337, 'grad_norm': 0.002076299641803403, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:14<22:14, 3.90s/it] 34%|███▍ | 179/520 [11:18<22:07, 3.89s/it] {'loss': 1.424, 'grad_norm': 0.0018925374992674942, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:18<22:07, 3.89s/it] 35%|███▍ | 180/520 [11:22<21:48, 3.85s/it] {'loss': 1.3383, 'grad_norm': 0.0020846601453478578, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:22<21:48, 3.85s/it] 35%|███▍ | 181/520 [11:26<21:29, 3.80s/it] {'loss': 1.3029, 'grad_norm': 0.0018318477637558762, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:26<21:29, 3.80s/it] 35%|███▌ | 182/520 [11:29<21:14, 3.77s/it] {'loss': 1.3096, 'grad_norm': 0.0020194527393682157, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:29<21:14, 3.77s/it] 35%|███▌ | 183/520 [11:33<21:19, 3.80s/it] {'loss': 1.352, 'grad_norm': 0.0020336421606418006, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:33<21:19, 3.80s/it] 35%|███▌ | 184/520 [11:37<21:10, 3.78s/it] {'loss': 1.2597, 'grad_norm': 0.0020077241198259814, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:37<21:10, 3.78s/it] 36%|███▌ | 185/520 [11:41<21:01, 3.77s/it] {'loss': 1.4327, 'grad_norm': 0.002010322702844064, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:41<21:01, 3.77s/it] 36%|███▌ | 186/520 [11:44<20:49, 3.74s/it] {'loss': 1.2833, 'grad_norm': 0.0019301224418169396, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:44<20:49, 3.74s/it] 36%|███▌ | 187/520 [11:48<20:42, 3.73s/it] {'loss': 1.3011, 'grad_norm': 0.0022870518155233884, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:48<20:42, 3.73s/it] 36%|███▌ | 188/520 [11:52<20:35, 3.72s/it] {'loss': 1.3717, 'grad_norm': 0.002116689573891423, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:52<20:35, 3.72s/it] 36%|███▋ | 189/520 [11:55<20:30, 3.72s/it] {'loss': 1.3824, 'grad_norm': 0.0017870271201307379, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:55<20:30, 3.72s/it] 37%|███▋ | 190/520 [11:59<20:21, 3.70s/it] {'loss': 1.2986, 'grad_norm': 0.0021076267459302447, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:59<20:21, 3.70s/it] 37%|███▋ | 191/520 [12:03<20:54, 3.81s/it] {'loss': 1.2499, 'grad_norm': 0.0017721197412291676, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:03<20:54, 3.81s/it] 37%|███▋ | 192/520 [12:07<21:16, 3.89s/it] {'loss': 1.3403, 'grad_norm': 0.0017824778304023152, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:07<21:16, 3.89s/it] 37%|███▋ | 193/520 [12:11<21:12, 3.89s/it] {'loss': 1.3932, 'grad_norm': 0.0030397245265361803, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:11<21:12, 3.89s/it] 37%|███▋ | 194/520 [12:15<21:06, 3.89s/it] {'loss': 1.2458, 'grad_norm': 0.0019625232689332263, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:15<21:06, 3.89s/it] 38%|███▊ | 195/520 [12:19<20:59, 3.87s/it] {'loss': 1.3568, 'grad_norm': 0.001924912511123542, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:19<20:59, 3.87s/it] 38%|███▊ | 196/520 [12:23<20:55, 3.87s/it] {'loss': 1.3255, 'grad_norm': 0.0021667906308159538, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:23<20:55, 3.87s/it] 38%|███▊ | 197/520 [12:27<20:50, 3.87s/it] {'loss': 1.277, 'grad_norm': 0.0018935444703060223, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:27<20:50, 3.87s/it] 38%|███▊ | 198/520 [12:30<20:44, 3.87s/it] {'loss': 1.3525, 'grad_norm': 0.00196592343673127, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:30<20:44, 3.87s/it] 38%|███▊ | 199/520 [12:34<20:42, 3.87s/it] {'loss': 1.2704, 'grad_norm': 0.0019949426054931803, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:34<20:42, 3.87s/it] 38%|███▊ | 200/520 [12:38<20:41, 3.88s/it] {'loss': 1.2898, 'grad_norm': 0.00202208635270333, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:38<20:41, 3.88s/it] 39%|███▊ | 201/520 [12:42<20:38, 3.88s/it] {'loss': 1.3183, 'grad_norm': 0.0019407538657155684, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:42<20:38, 3.88s/it] 39%|███▉ | 202/520 [12:46<20:35, 3.88s/it] {'loss': 1.2672, 'grad_norm': 0.001854817555970354, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:46<20:35, 3.88s/it] 39%|███▉ | 203/520 [12:50<20:26, 3.87s/it] {'loss': 1.3196, 'grad_norm': 0.0020415975983161393, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:50<20:26, 3.87s/it] 39%|███▉ | 204/520 [12:53<20:03, 3.81s/it] {'loss': 1.3431, 'grad_norm': 0.0020382897871836538, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:53<20:03, 3.81s/it] 39%|███▉ | 205/520 [12:57<19:47, 3.77s/it] {'loss': 1.3221, 'grad_norm': 0.0021174252687392783, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:57<19:47, 3.77s/it] 40%|███▉ | 206/520 [13:01<19:36, 3.75s/it] {'loss': 1.3867, 'grad_norm': 0.0019493065698673942, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:01<19:36, 3.75s/it] 40%|███▉ | 207/520 [13:05<19:27, 3.73s/it] {'loss': 1.3032, 'grad_norm': 0.0019555393677294737, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:05<19:27, 3.73s/it] 40%|████ | 208/520 [13:08<19:17, 3.71s/it] {'loss': 1.3579, 'grad_norm': 0.0021942780825240423, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:08<19:17, 3.71s/it] 40%|████ | 209/520 [13:12<19:19, 3.73s/it] {'loss': 1.2747, 'grad_norm': 0.001968724371615986, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:12<19:19, 3.73s/it] 40%|████ | 210/520 [13:16<19:11, 3.71s/it] {'loss': 1.3607, 'grad_norm': 0.002124548387444516, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:16<19:11, 3.71s/it] 41%|████ | 211/520 [13:19<19:05, 3.71s/it] {'loss': 1.3605, 'grad_norm': 0.001811570759324109, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:19<19:05, 3.71s/it] 41%|████ | 212/520 [13:23<19:00, 3.70s/it] {'loss': 1.3377, 'grad_norm': 0.002007761615095212, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:23<19:00, 3.70s/it] 41%|████ | 213/520 [13:27<18:58, 3.71s/it] {'loss': 1.2967, 'grad_norm': 0.0023782017771847015, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:27<18:58, 3.71s/it] 41%|████ | 214/520 [13:30<18:49, 3.69s/it] {'loss': 1.2871, 'grad_norm': 0.002044665683479174, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:30<18:49, 3.69s/it] 41%|████▏ | 215/520 [13:34<18:44, 3.69s/it] {'loss': 1.2387, 'grad_norm': 0.0019970543792148174, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:34<18:44, 3.69s/it] 42%|████▏ | 216/520 [13:38<18:38, 3.68s/it] {'loss': 1.2078, 'grad_norm': 0.0018097422252602335, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:38<18:38, 3.68s/it] 42%|████▏ | 217/520 [13:42<18:44, 3.71s/it] {'loss': 1.3299, 'grad_norm': 0.0019365842577466989, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:42<18:44, 3.71s/it] 42%|████▏ | 218/520 [13:45<18:40, 3.71s/it] {'loss': 1.3239, 'grad_norm': 0.0019770892511836798, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:45<18:40, 3.71s/it] 42%|████▏ | 219/520 [13:49<18:48, 3.75s/it] {'loss': 1.3004, 'grad_norm': 0.0017115239537278369, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:49<18:48, 3.75s/it] 42%|████▏ | 220/520 [13:53<18:53, 3.78s/it] {'loss': 1.3145, 'grad_norm': 0.002242868024498637, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:53<18:53, 3.78s/it] 42%|████▎ | 221/520 [13:57<18:50, 3.78s/it] {'loss': 1.3291, 'grad_norm': 0.002062274192443457, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:57<18:50, 3.78s/it] 43%|████▎ | 222/520 [14:00<18:35, 3.74s/it] {'loss': 1.2416, 'grad_norm': 0.0019821879476358225, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:00<18:35, 3.74s/it] 43%|████▎ | 223/520 [14:04<18:33, 3.75s/it] {'loss': 1.2332, 'grad_norm': 0.0017885876368598837, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:04<18:33, 3.75s/it] 43%|████▎ | 224/520 [14:08<18:23, 3.73s/it] {'loss': 1.4249, 'grad_norm': 0.0026879675448863833, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:08<18:23, 3.73s/it] 43%|████▎ | 225/520 [14:12<18:16, 3.72s/it] {'loss': 1.2577, 'grad_norm': 0.0019819195452595016, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:12<18:16, 3.72s/it] 43%|████▎ | 226/520 [14:15<18:07, 3.70s/it] {'loss': 1.3548, 'grad_norm': 0.0018114893240739826, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:15<18:07, 3.70s/it] 44%|████▎ | 227/520 [14:19<18:01, 3.69s/it] {'loss': 1.3367, 'grad_norm': 0.0017891431602696701, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:19<18:01, 3.69s/it] 44%|████▍ | 228/520 [14:23<17:58, 3.69s/it] {'loss': 1.4347, 'grad_norm': 0.002042131355005374, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:23<17:58, 3.69s/it] 44%|████▍ | 229/520 [14:26<17:52, 3.68s/it] {'loss': 1.3084, 'grad_norm': 0.001700904982707256, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:26<17:52, 3.68s/it] 44%|████▍ | 230/520 [14:30<17:47, 3.68s/it] {'loss': 1.1943, 'grad_norm': 0.0019798809418815027, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:30<17:47, 3.68s/it] 44%|████▍ | 231/520 [14:34<17:43, 3.68s/it] {'loss': 1.2532, 'grad_norm': 0.0017034791809830677, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:34<17:43, 3.68s/it] 45%|████▍ | 232/520 [14:37<17:39, 3.68s/it] {'loss': 1.4729, 'grad_norm': 0.0026204869306797555, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:37<17:39, 3.68s/it] 45%|████▍ | 233/520 [14:41<17:43, 3.70s/it] {'loss': 1.3349, 'grad_norm': 0.0020802131499292865, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:41<17:43, 3.70s/it] 45%|████▌ | 234/520 [14:45<17:33, 3.68s/it] {'loss': 1.2051, 'grad_norm': 0.0020398917445271377, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:45<17:33, 3.68s/it] 45%|████▌ | 235/520 [14:48<17:32, 3.69s/it] {'loss': 1.2631, 'grad_norm': 0.002075270596536038, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:48<17:32, 3.69s/it] 45%|████▌ | 236/520 [14:52<17:47, 3.76s/it] {'loss': 1.3706, 'grad_norm': 0.0018984799986342287, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:52<17:47, 3.76s/it] 46%|████▌ | 237/520 [14:56<17:33, 3.72s/it] {'loss': 1.3391, 'grad_norm': 0.0019950561491145374, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:56<17:33, 3.72s/it] 46%|████▌ | 238/520 [15:00<17:32, 3.73s/it] {'loss': 1.2694, 'grad_norm': 0.001832845006276492, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:00<17:32, 3.73s/it] 46%|████▌ | 239/520 [15:03<17:28, 3.73s/it] {'loss': 1.3652, 'grad_norm': 0.0019356056305728363, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:03<17:28, 3.73s/it] 46%|████▌ | 240/520 [15:07<17:19, 3.71s/it] {'loss': 1.1444, 'grad_norm': 0.001986801597050041, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:07<17:19, 3.71s/it] 46%|████▋ | 241/520 [15:11<17:09, 3.69s/it] {'loss': 1.2317, 'grad_norm': 0.0017367332779446953, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:11<17:09, 3.69s/it] 47%|████▋ | 242/520 [15:14<17:02, 3.68s/it] {'loss': 1.255, 'grad_norm': 0.001782361005921778, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:14<17:02, 3.68s/it] 47%|████▋ | 243/520 [15:18<16:59, 3.68s/it] {'loss': 1.2431, 'grad_norm': 0.001887926247667154, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:18<16:59, 3.68s/it] 47%|████▋ | 244/520 [15:22<16:52, 3.67s/it] {'loss': 1.3709, 'grad_norm': 0.0020043540156241104, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:22<16:52, 3.67s/it] 47%|████▋ | 245/520 [15:25<16:43, 3.65s/it] {'loss': 1.2265, 'grad_norm': 0.0018474367364264008, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:25<16:43, 3.65s/it] 47%|████▋ | 246/520 [15:29<16:38, 3.64s/it] {'loss': 1.4203, 'grad_norm': 0.0019847989053645705, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:29<16:38, 3.64s/it] 48%|████▊ | 247/520 [15:33<16:34, 3.64s/it] {'loss': 1.4098, 'grad_norm': 0.001917041350317627, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:33<16:34, 3.64s/it] 48%|████▊ | 248/520 [15:36<16:28, 3.63s/it] {'loss': 1.2373, 'grad_norm': 0.002031993050399593, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:36<16:28, 3.63s/it] 48%|████▊ | 249/520 [15:40<16:29, 3.65s/it] {'loss': 1.3307, 'grad_norm': 0.0018373684686496883, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:40<16:29, 3.65s/it] 48%|████▊ | 250/520 [15:44<16:28, 3.66s/it] {'loss': 1.2626, 'grad_norm': 0.0020380999477915087, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:44<16:28, 3.66s/it] 48%|████▊ | 251/520 [15:47<16:26, 3.67s/it] {'loss': 1.3365, 'grad_norm': 0.0017373833081173985, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:47<16:26, 3.67s/it] 48%|████▊ | 252/520 [15:51<16:24, 3.67s/it] {'loss': 1.3193, 'grad_norm': 0.001843186023374389, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:51<16:24, 3.67s/it] 49%|████▊ | 253/520 [15:55<16:27, 3.70s/it] {'loss': 1.3337, 'grad_norm': 0.0020830516220618645, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:55<16:27, 3.70s/it] 49%|████▉ | 254/520 [15:58<16:22, 3.70s/it] {'loss': 1.2577, 'grad_norm': 0.0018240368446772008, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:58<16:22, 3.70s/it] 49%|████▉ | 255/520 [16:02<16:19, 3.70s/it] {'loss': 1.2622, 'grad_norm': 0.002022581711459243, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:02<16:19, 3.70s/it] 49%|████▉ | 256/520 [16:06<16:14, 3.69s/it] {'loss': 1.3101, 'grad_norm': 0.001969894477577831, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:06<16:14, 3.69s/it] 49%|████▉ | 257/520 [16:09<16:15, 3.71s/it] {'loss': 1.3008, 'grad_norm': 0.0018750896086650649, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:09<16:15, 3.71s/it] 50%|████▉ | 258/520 [16:13<16:11, 3.71s/it] {'loss': 1.3104, 'grad_norm': 0.0016732286278647928, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:13<16:11, 3.71s/it] 50%|████▉ | 259/520 [16:17<16:05, 3.70s/it] {'loss': 1.3859, 'grad_norm': 0.002146324978856792, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:17<16:05, 3.70s/it] 50%|█████ | 260/520 [16:20<15:57, 3.68s/it] {'loss': 1.3834, 'grad_norm': 0.0020686063644514605, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:20<15:57, 3.68s/it] 50%|█████ | 261/520 [16:24<15:53, 3.68s/it] {'loss': 1.3214, 'grad_norm': 0.002049713922500538, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:24<15:53, 3.68s/it] 50%|█████ | 262/520 [16:28<15:51, 3.69s/it] {'loss': 1.2234, 'grad_norm': 0.0018761131092795476, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:28<15:51, 3.69s/it] 51%|█████ | 263/520 [16:32<15:48, 3.69s/it] {'loss': 1.3327, 'grad_norm': 0.0020275555944458848, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:32<15:48, 3.69s/it] 51%|█████ | 264/520 [16:35<15:44, 3.69s/it] {'loss': 1.3398, 'grad_norm': 0.001870032964042851, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:35<15:44, 3.69s/it] 51%|█████ | 265/520 [16:39<15:40, 3.69s/it] {'loss': 1.2382, 'grad_norm': 0.002034819469943561, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:39<15:40, 3.69s/it] 51%|█████ | 266/520 [16:43<15:36, 3.69s/it] {'loss': 1.097, 'grad_norm': 0.0016625115279263545, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:43<15:36, 3.69s/it] 51%|█████▏ | 267/520 [16:46<15:33, 3.69s/it] {'loss': 1.2395, 'grad_norm': 0.0018541577537325982, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:46<15:33, 3.69s/it] 52%|█████▏ | 268/520 [16:50<15:28, 3.68s/it] {'loss': 1.4447, 'grad_norm': 0.00233354169750256, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:50<15:28, 3.68s/it] 52%|█████▏ | 269/520 [16:54<15:23, 3.68s/it] {'loss': 1.3468, 'grad_norm': 0.0018803124575082293, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:54<15:23, 3.68s/it] 52%|█████▏ | 270/520 [16:57<15:23, 3.69s/it] {'loss': 1.2466, 'grad_norm': 0.0017690274393655188, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:57<15:23, 3.69s/it] 52%|█████▏ | 271/520 [17:01<15:23, 3.71s/it] {'loss': 1.3386, 'grad_norm': 0.0019006796288003466, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:01<15:23, 3.71s/it] 52%|█████▏ | 272/520 [17:05<15:17, 3.70s/it] {'loss': 1.2631, 'grad_norm': 0.0019081005551189927, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:05<15:17, 3.70s/it] 52%|█████▎ | 273/520 [17:08<15:10, 3.69s/it] {'loss': 1.3989, 'grad_norm': 0.00204379120753758, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:08<15:10, 3.69s/it] 53%|█████▎ | 274/520 [17:12<15:05, 3.68s/it] {'loss': 1.293, 'grad_norm': 0.00206475290425934, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:12<15:05, 3.68s/it] 53%|█████▎ | 275/520 [17:16<14:59, 3.67s/it] {'loss': 1.2392, 'grad_norm': 0.002010575368667895, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:16<14:59, 3.67s/it] 53%|█████▎ | 276/520 [17:19<14:54, 3.66s/it] {'loss': 1.3192, 'grad_norm': 0.0022035961007159915, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:19<14:54, 3.66s/it] 53%|█████▎ | 277/520 [17:23<14:51, 3.67s/it] {'loss': 1.3833, 'grad_norm': 0.0018470706531543482, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:23<14:51, 3.67s/it] 53%|█████▎ | 278/520 [17:27<14:45, 3.66s/it] {'loss': 1.1917, 'grad_norm': 0.0017463701086020463, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:27<14:45, 3.66s/it] 54%|█████▎ | 279/520 [17:30<14:39, 3.65s/it] {'loss': 1.27, 'grad_norm': 0.002193375330313697, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:30<14:39, 3.65s/it] 54%|█████▍ | 280/520 [17:34<14:36, 3.65s/it] {'loss': 1.2442, 'grad_norm': 0.002243701084039769, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:34<14:36, 3.65s/it] 54%|█████▍ | 281/520 [17:38<14:31, 3.65s/it] {'loss': 1.3468, 'grad_norm': 0.0019816609753914834, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:38<14:31, 3.65s/it] 54%|█████▍ | 282/520 [17:41<14:30, 3.66s/it] {'loss': 1.2072, 'grad_norm': 0.0017114626375470438, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:41<14:30, 3.66s/it] 54%|█████▍ | 283/520 [17:45<14:26, 3.66s/it] {'loss': 1.3685, 'grad_norm': 0.002064462344791014, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:45<14:26, 3.66s/it] 55%|█████▍ | 284/520 [17:49<14:22, 3.66s/it] {'loss': 1.2541, 'grad_norm': 0.0020428421215791734, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:49<14:22, 3.66s/it] 55%|█████▍ | 285/520 [17:52<14:19, 3.66s/it] {'loss': 1.2306, 'grad_norm': 0.0018946634070589614, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:52<14:19, 3.66s/it] 55%|█████▌ | 286/520 [17:56<14:15, 3.66s/it] {'loss': 1.1027, 'grad_norm': 0.0020107173701820226, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:56<14:15, 3.66s/it] 55%|█████▌ | 287/520 [18:00<14:14, 3.67s/it] {'loss': 1.345, 'grad_norm': 0.0019791652399223088, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:00<14:14, 3.67s/it] 55%|█████▌ | 288/520 [18:03<14:08, 3.66s/it] {'loss': 1.3871, 'grad_norm': 0.0018969296426998177, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:03<14:08, 3.66s/it] 56%|█████▌ | 289/520 [18:07<14:06, 3.66s/it] {'loss': 1.2434, 'grad_norm': 0.001711957049336324, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:07<14:06, 3.66s/it] 56%|█████▌ | 290/520 [18:11<13:58, 3.65s/it] {'loss': 1.165, 'grad_norm': 0.0017311077865144707, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:11<13:58, 3.65s/it] 56%|█████▌ | 291/520 [18:14<13:58, 3.66s/it] {'loss': 1.2237, 'grad_norm': 0.0019541079833854027, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:14<13:58, 3.66s/it] 56%|█████▌ | 292/520 [18:18<13:53, 3.66s/it] {'loss': 1.274, 'grad_norm': 0.0018472317804026361, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:18<13:53, 3.66s/it] 56%|█████▋ | 293/520 [18:22<14:05, 3.72s/it] {'loss': 1.2096, 'grad_norm': 0.0019215945725959064, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:22<14:05, 3.72s/it] 57%|█████▋ | 294/520 [18:26<14:05, 3.74s/it] {'loss': 1.2394, 'grad_norm': 0.002038965919537297, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:26<14:05, 3.74s/it] 57%|█████▋ | 295/520 [18:29<14:06, 3.76s/it] {'loss': 1.3154, 'grad_norm': 0.0018890268334391081, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:29<14:06, 3.76s/it] 57%|█████▋ | 296/520 [18:33<14:08, 3.79s/it] {'loss': 1.1855, 'grad_norm': 0.0019458661970966815, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:33<14:08, 3.79s/it] 57%|█████▋ | 297/520 [18:37<14:06, 3.80s/it] {'loss': 1.3105, 'grad_norm': 0.0019824805485196125, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:37<14:06, 3.80s/it] 57%|█████▋ | 298/520 [18:41<14:00, 3.79s/it] {'loss': 1.2827, 'grad_norm': 0.0016619098723183203, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:41<14:00, 3.79s/it] 57%|█████▊ | 299/520 [18:45<13:56, 3.79s/it] {'loss': 1.332, 'grad_norm': 0.001794526653649084, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:45<13:56, 3.79s/it] 58%|█████▊ | 300/520 [18:48<13:51, 3.78s/it] {'loss': 1.3348, 'grad_norm': 0.0018242602716706784, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:48<13:51, 3.78s/it] 58%|█████▊ | 301/520 [18:52<13:41, 3.75s/it] {'loss': 1.31, 'grad_norm': 0.0018228642732679404, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:52<13:41, 3.75s/it] 58%|█████▊ | 302/520 [18:56<13:30, 3.72s/it] {'loss': 1.3527, 'grad_norm': 0.0019802700350512368, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:56<13:30, 3.72s/it] 58%|█████▊ | 303/520 [18:59<13:23, 3.70s/it] {'loss': 1.2482, 'grad_norm': 0.002059672141024724, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:59<13:23, 3.70s/it] 58%|█████▊ | 304/520 [19:03<13:20, 3.71s/it] {'loss': 1.2401, 'grad_norm': 0.0019886353455798316, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:03<13:20, 3.71s/it] 59%|█████▊ | 305/520 [19:07<13:13, 3.69s/it] {'loss': 1.3512, 'grad_norm': 0.002078511817195078, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:07<13:13, 3.69s/it] 59%|█████▉ | 306/520 [19:10<13:08, 3.68s/it] {'loss': 1.2901, 'grad_norm': 0.0018290551809598812, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:10<13:08, 3.68s/it] 59%|█████▉ | 307/520 [19:15<13:35, 3.83s/it] {'loss': 1.2223, 'grad_norm': 0.0017447486041320863, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:15<13:35, 3.83s/it] 59%|█████▉ | 308/520 [19:18<13:20, 3.78s/it] {'loss': 1.3429, 'grad_norm': 0.002005421013224939, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:18<13:20, 3.78s/it] 59%|█████▉ | 309/520 [19:22<13:10, 3.74s/it] {'loss': 1.2219, 'grad_norm': 0.0016860171364833738, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:22<13:10, 3.74s/it] 60%|█████▉ | 310/520 [19:26<12:59, 3.71s/it] {'loss': 1.2031, 'grad_norm': 0.001780858683274138, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:26<12:59, 3.71s/it] 60%|█████▉ | 311/520 [19:29<12:53, 3.70s/it] {'loss': 1.1765, 'grad_norm': 0.001805653983836877, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:29<12:53, 3.70s/it] 60%|██████ | 312/520 [19:33<12:45, 3.68s/it] {'loss': 1.1676, 'grad_norm': 0.0019263627769412846, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:33<12:45, 3.68s/it] 60%|██████ | 313/520 [19:36<12:40, 3.68s/it] {'loss': 1.1571, 'grad_norm': 0.001613950954976428, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:36<12:40, 3.68s/it] 60%|██████ | 314/520 [19:41<13:07, 3.82s/it] {'loss': 1.1916, 'grad_norm': 0.0016638551507284869, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:41<13:07, 3.82s/it] 61%|██████ | 315/520 [19:44<12:56, 3.79s/it] {'loss': 1.3055, 'grad_norm': 0.0021720887839922517, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:44<12:56, 3.79s/it] 61%|██████ | 316/520 [19:49<13:15, 3.90s/it] {'loss': 1.1638, 'grad_norm': 0.0021838519441203354, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:49<13:15, 3.90s/it] 61%|██████ | 317/520 [19:52<12:58, 3.84s/it] {'loss': 1.1897, 'grad_norm': 0.001616551293575509, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:52<12:58, 3.84s/it] 61%|██████ | 318/520 [19:56<12:44, 3.79s/it] {'loss': 1.3088, 'grad_norm': 0.0019850405682130617, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:56<12:44, 3.79s/it] 61%|██████▏ | 319/520 [20:00<12:58, 3.87s/it] {'loss': 1.1754, 'grad_norm': 0.0018246971951640582, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:00<12:58, 3.87s/it] 62%|██████▏ | 320/520 [20:04<12:56, 3.88s/it] {'loss': 1.1139, 'grad_norm': 0.0019097801002015357, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:04<12:56, 3.88s/it] 62%|██████▏ | 321/520 [20:08<12:45, 3.85s/it] {'loss': 1.3208, 'grad_norm': 0.0019592265259293835, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:08<12:45, 3.85s/it] 62%|██████▏ | 322/520 [20:11<12:31, 3.79s/it] {'loss': 1.1823, 'grad_norm': 0.0018436900726176199, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:11<12:31, 3.79s/it] 62%|██████▏ | 323/520 [20:15<12:18, 3.75s/it] {'loss': 1.2547, 'grad_norm': 0.001966987155336665, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:15<12:18, 3.75s/it] 62%|██████▏ | 324/520 [20:19<12:09, 3.72s/it] {'loss': 1.2522, 'grad_norm': 0.0019281825692393753, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:19<12:09, 3.72s/it] 62%|██████▎ | 325/520 [20:22<12:04, 3.72s/it] {'loss': 1.2601, 'grad_norm': 0.0019281891678048927, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:22<12:04, 3.72s/it] 63%|██████▎ | 326/520 [20:26<11:56, 3.69s/it] {'loss': 1.2513, 'grad_norm': 0.001819737721032606, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:26<11:56, 3.69s/it] 63%|██████▎ | 327/520 [20:30<11:51, 3.69s/it] {'loss': 1.3175, 'grad_norm': 0.0020296586746981074, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:30<11:51, 3.69s/it] 63%|██████▎ | 328/520 [20:33<11:45, 3.67s/it] {'loss': 1.3086, 'grad_norm': 0.0019210794581622944, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:33<11:45, 3.67s/it] 63%|██████▎ | 329/520 [20:37<11:40, 3.67s/it] {'loss': 1.1678, 'grad_norm': 0.001630870184872911, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:37<11:40, 3.67s/it] 63%|██████▎ | 330/520 [20:41<11:35, 3.66s/it] {'loss': 1.2459, 'grad_norm': 0.0016676095451111728, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:41<11:35, 3.66s/it] 64%|██████▎ | 331/520 [20:44<11:32, 3.66s/it] {'loss': 1.2016, 'grad_norm': 0.0017345291235235463, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:44<11:32, 3.66s/it] 64%|██████▍ | 332/520 [20:48<11:28, 3.66s/it] {'loss': 1.3306, 'grad_norm': 0.0017118487353533444, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:48<11:28, 3.66s/it] 64%|██████▍ | 333/520 [20:52<11:27, 3.67s/it] {'loss': 1.3562, 'grad_norm': 0.001953386046224116, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:52<11:27, 3.67s/it] 64%|██████▍ | 334/520 [20:55<11:29, 3.71s/it] {'loss': 1.2531, 'grad_norm': 0.002009867700523358, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:55<11:29, 3.71s/it] 64%|██████▍ | 335/520 [20:59<11:22, 3.69s/it] {'loss': 1.2463, 'grad_norm': 0.0016635702832438184, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:59<11:22, 3.69s/it] 65%|██████▍ | 336/520 [21:03<11:20, 3.70s/it] {'loss': 1.1359, 'grad_norm': 0.0019918449403549194, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:03<11:20, 3.70s/it] 65%|██████▍ | 337/520 [21:06<11:14, 3.69s/it] {'loss': 1.1363, 'grad_norm': 0.0018098607239414595, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:06<11:14, 3.69s/it] 65%|██████▌ | 338/520 [21:10<11:11, 3.69s/it] {'loss': 1.2581, 'grad_norm': 0.0018005672587287607, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:10<11:11, 3.69s/it] 65%|██████▌ | 339/520 [21:14<11:10, 3.71s/it] {'loss': 1.2011, 'grad_norm': 0.001760807841509949, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:14<11:10, 3.71s/it] 65%|██████▌ | 340/520 [21:18<11:07, 3.71s/it] {'loss': 1.1965, 'grad_norm': 0.0018042743597355261, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:18<11:07, 3.71s/it] 66%|██████▌ | 341/520 [21:21<10:59, 3.69s/it] {'loss': 1.2115, 'grad_norm': 0.0018609654932741188, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:21<10:59, 3.69s/it] 66%|██████▌ | 342/520 [21:25<10:54, 3.68s/it] {'loss': 1.2981, 'grad_norm': 0.002283227636184851, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:25<10:54, 3.68s/it] 66%|██████▌ | 343/520 [21:29<10:55, 3.70s/it] {'loss': 1.2522, 'grad_norm': 0.0017874883084867455, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:29<10:55, 3.70s/it] 66%|██████▌ | 344/520 [21:32<10:59, 3.75s/it] {'loss': 1.1655, 'grad_norm': 0.0017884437432287657, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:32<10:59, 3.75s/it] 66%|██████▋ | 345/520 [21:36<10:58, 3.76s/it] {'loss': 1.2856, 'grad_norm': 0.002112235705692318, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:36<10:58, 3.76s/it] 67%|██████▋ | 346/520 [21:40<11:00, 3.80s/it] {'loss': 1.2587, 'grad_norm': 0.0017299830338927645, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:40<11:00, 3.80s/it] 67%|██████▋ | 347/520 [21:44<10:58, 3.81s/it] {'loss': 1.1801, 'grad_norm': 0.0017301923300043838, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:44<10:58, 3.81s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:48<10:55, 3.81s/it] {'loss': 1.1399, 'grad_norm': 0.002078244321857179, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:48<10:55, 3.81s/it] 67%|██████▋ | 349/520 [21:52<10:51, 3.81s/it] {'loss': 1.1803, 'grad_norm': 0.001819076302669517, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:52<10:51, 3.81s/it] 67%|██████▋ | 350/520 [21:55<10:47, 3.81s/it] {'loss': 1.223, 'grad_norm': 0.0018474822017365646, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:55<10:47, 3.81s/it] 68%|██████▊ | 351/520 [21:59<10:46, 3.82s/it] {'loss': 1.1317, 'grad_norm': 0.001635472962910989, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:59<10:46, 3.82s/it] 68%|██████▊ | 352/520 [22:03<10:43, 3.83s/it] {'loss': 1.259, 'grad_norm': 0.0018090953966598264, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:03<10:43, 3.83s/it] 68%|██████▊ | 353/520 [22:07<10:35, 3.81s/it] {'loss': 1.2064, 'grad_norm': 0.0016349635485458152, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:07<10:35, 3.81s/it] 68%|██████▊ | 354/520 [22:11<10:25, 3.77s/it] {'loss': 1.3352, 'grad_norm': 0.0017183816004449682, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:11<10:25, 3.77s/it] 68%|██████▊ | 355/520 [22:14<10:16, 3.74s/it] {'loss': 1.1908, 'grad_norm': 0.001798620398606143, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:14<10:16, 3.74s/it] 68%|██████▊ | 356/520 [22:18<10:10, 3.72s/it] {'loss': 1.1878, 'grad_norm': 0.0018477199168563687, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:18<10:10, 3.72s/it] 69%|██████▊ | 357/520 [22:22<10:12, 3.76s/it] {'loss': 1.215, 'grad_norm': 0.0016855145400003111, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:22<10:12, 3.76s/it] 69%|██████▉ | 358/520 [22:26<10:14, 3.79s/it] {'loss': 1.1509, 'grad_norm': 0.0017207631857037331, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:26<10:14, 3.79s/it] 69%|██████▉ | 359/520 [22:29<10:13, 3.81s/it] {'loss': 1.2652, 'grad_norm': 0.0018764231778032876, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:29<10:13, 3.81s/it] 69%|██████▉ | 360/520 [22:33<10:15, 3.85s/it] {'loss': 1.2769, 'grad_norm': 0.0018778996937584573, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:33<10:15, 3.85s/it] 69%|██████▉ | 361/520 [22:37<10:13, 3.86s/it] {'loss': 1.2777, 'grad_norm': 0.0016570901921947258, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:37<10:13, 3.86s/it] 70%|██████▉ | 362/520 [22:41<10:10, 3.86s/it] {'loss': 1.2109, 'grad_norm': 0.001935004899464571, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:41<10:10, 3.86s/it] 70%|██████▉ | 363/520 [22:45<10:03, 3.85s/it] {'loss': 1.2279, 'grad_norm': 0.0017224941698611633, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:45<10:03, 3.85s/it] 70%|███████ | 364/520 [22:49<09:53, 3.80s/it] {'loss': 1.3007, 'grad_norm': 0.0018366090049641755, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:49<09:53, 3.80s/it] 70%|███████ | 365/520 [22:52<09:44, 3.77s/it] {'loss': 1.2978, 'grad_norm': 0.0018618381727355056, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:52<09:44, 3.77s/it] 70%|███████ | 366/520 [22:56<09:38, 3.76s/it] {'loss': 1.2502, 'grad_norm': 0.001704666457247974, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:56<09:38, 3.76s/it] 71%|███████ | 367/520 [23:00<09:32, 3.74s/it] {'loss': 1.2473, 'grad_norm': 0.0017691309472415667, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:00<09:32, 3.74s/it] 71%|███████ | 368/520 [23:04<09:27, 3.74s/it] {'loss': 1.1, 'grad_norm': 0.0020169783477486263, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:04<09:27, 3.74s/it] 71%|███████ | 369/520 [23:07<09:21, 3.72s/it] {'loss': 1.2494, 'grad_norm': 0.0016465782686624223, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:07<09:21, 3.72s/it] 71%|███████ | 370/520 [23:11<09:17, 3.71s/it] {'loss': 1.159, 'grad_norm': 0.0016466753588309927, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:11<09:17, 3.71s/it] 71%|███████▏ | 371/520 [23:15<09:11, 3.70s/it] {'loss': 1.1582, 'grad_norm': 0.0018442639237441385, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:15<09:11, 3.70s/it] 72%|███████▏ | 372/520 [23:18<09:08, 3.70s/it] {'loss': 1.3398, 'grad_norm': 0.0016184167045702805, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:18<09:08, 3.70s/it] 72%|███████▏ | 373/520 [23:22<09:04, 3.70s/it] {'loss': 1.2166, 'grad_norm': 0.001866565537744226, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:22<09:04, 3.70s/it] 72%|███████▏ | 374/520 [23:26<09:03, 3.72s/it] {'loss': 1.2397, 'grad_norm': 0.0017138544993580834, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:26<09:03, 3.72s/it] 72%|███████▏ | 375/520 [23:30<09:02, 3.74s/it] {'loss': 1.1556, 'grad_norm': 0.0018877387906157657, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:30<09:02, 3.74s/it] 72%|███████▏ | 376/520 [23:33<08:55, 3.72s/it] {'loss': 1.2765, 'grad_norm': 0.0016807149671783103, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:33<08:55, 3.72s/it] 72%|███████▎ | 377/520 [23:37<08:48, 3.70s/it] {'loss': 1.2109, 'grad_norm': 0.001789690612811793, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:37<08:48, 3.70s/it] 73%|███████▎ | 378/520 [23:40<08:43, 3.69s/it] {'loss': 1.2622, 'grad_norm': 0.0016959501472444875, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:41<08:43, 3.69s/it] 73%|███████▎ | 379/520 [23:44<08:39, 3.68s/it] {'loss': 1.2495, 'grad_norm': 0.0016537034459015076, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:44<08:39, 3.68s/it] 73%|███████▎ | 380/520 [23:48<08:35, 3.68s/it] {'loss': 1.3138, 'grad_norm': 0.0022217377353371355, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:48<08:35, 3.68s/it] 73%|███████▎ | 381/520 [23:52<08:33, 3.70s/it] {'loss': 1.244, 'grad_norm': 0.0017349340573131204, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:52<08:33, 3.70s/it] 73%|███████▎ | 382/520 [23:55<08:37, 3.75s/it] {'loss': 1.2669, 'grad_norm': 0.0017617940318522952, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:55<08:37, 3.75s/it] 74%|███████▎ | 383/520 [23:59<08:37, 3.78s/it] {'loss': 1.0813, 'grad_norm': 0.001829467828681564, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:59<08:37, 3.78s/it] 74%|███████▍ | 384/520 [24:03<08:36, 3.79s/it] {'loss': 1.336, 'grad_norm': 0.0016549794339755684, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:03<08:36, 3.79s/it] 74%|███████▍ | 385/520 [24:07<08:42, 3.87s/it] {'loss': 1.2222, 'grad_norm': 0.0016428396098067798, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:07<08:42, 3.87s/it] 74%|███████▍ | 386/520 [24:11<08:49, 3.95s/it] {'loss': 1.1703, 'grad_norm': 0.001505151130014924, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:11<08:49, 3.95s/it] 74%|███████▍ | 387/520 [24:16<08:54, 4.02s/it] {'loss': 1.3362, 'grad_norm': 0.0017606806783378218, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:16<08:54, 4.02s/it] 75%|███████▍ | 388/520 [24:20<08:53, 4.04s/it] {'loss': 1.1182, 'grad_norm': 0.001689001789541186, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:20<08:53, 4.04s/it] 75%|███████▍ | 389/520 [24:23<08:41, 3.98s/it] {'loss': 1.1759, 'grad_norm': 0.0022641376726381564, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:23<08:41, 3.98s/it] 75%|███████▌ | 390/520 [24:27<08:30, 3.93s/it] {'loss': 1.2427, 'grad_norm': 0.0016879082512593958, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:27<08:30, 3.93s/it] 75%|███████▌ | 391/520 [24:31<08:34, 3.99s/it] {'loss': 1.3253, 'grad_norm': 0.001835907655455613, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:31<08:34, 3.99s/it] 75%|███████▌ | 392/520 [24:36<08:36, 4.04s/it] {'loss': 1.1268, 'grad_norm': 0.0016608930131525894, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:36<08:36, 4.04s/it] 76%|███████▌ | 393/520 [24:40<08:37, 4.07s/it] {'loss': 1.1542, 'grad_norm': 0.0015328183570992634, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:40<08:37, 4.07s/it] 76%|███████▌ | 394/520 [24:44<08:32, 4.07s/it] {'loss': 1.1942, 'grad_norm': 0.0019159334362267488, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:44<08:32, 4.07s/it] 76%|███████▌ | 395/520 [24:48<08:19, 3.99s/it] {'loss': 1.1599, 'grad_norm': 0.0019618949795048636, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:48<08:19, 3.99s/it] 76%|███████▌ | 396/520 [24:51<08:07, 3.93s/it] {'loss': 1.2409, 'grad_norm': 0.0018705845534838588, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:51<08:07, 3.93s/it] 76%|███████▋ | 397/520 [24:55<07:59, 3.90s/it] {'loss': 1.2278, 'grad_norm': 0.0016627265978662253, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:55<07:59, 3.90s/it] 77%|███████▋ | 398/520 [24:59<07:52, 3.88s/it] {'loss': 1.2264, 'grad_norm': 0.001786315940263252, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:59<07:52, 3.88s/it] 77%|███████▋ | 399/520 [25:03<07:48, 3.87s/it] {'loss': 1.2044, 'grad_norm': 0.0016971904439370942, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:03<07:48, 3.87s/it] 77%|███████▋ | 400/520 [25:07<07:42, 3.85s/it] {'loss': 1.2374, 'grad_norm': 0.0016412591454362708, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:07<07:42, 3.85s/it] 77%|███████▋ | 401/520 [25:10<07:32, 3.80s/it] {'loss': 1.0517, 'grad_norm': 0.001998912056969298, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:10<07:32, 3.80s/it] 77%|███████▋ | 402/520 [25:14<07:26, 3.78s/it] {'loss': 1.1721, 'grad_norm': 0.0018708737561761153, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:14<07:26, 3.78s/it] 78%|███████▊ | 403/520 [25:18<07:24, 3.80s/it] {'loss': 1.1993, 'grad_norm': 0.001967393594936681, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:18<07:24, 3.80s/it] 78%|███████▊ | 404/520 [25:22<07:24, 3.83s/it] {'loss': 1.0999, 'grad_norm': 0.0021466488393323854, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:22<07:24, 3.83s/it] 78%|███████▊ | 405/520 [25:26<07:23, 3.85s/it] {'loss': 1.208, 'grad_norm': 0.0017458705551823743, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:26<07:23, 3.85s/it] 78%|███████▊ | 406/520 [25:30<07:22, 3.88s/it] {'loss': 1.1349, 'grad_norm': 0.0020247254158974764, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:30<07:22, 3.88s/it] 78%|███████▊ | 407/520 [25:34<07:19, 3.89s/it] {'loss': 1.2917, 'grad_norm': 0.001764718465751009, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:34<07:19, 3.89s/it] 78%|███████▊ | 408/520 [25:37<07:09, 3.83s/it] {'loss': 1.1823, 'grad_norm': 0.0018592788882771515, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:37<07:09, 3.83s/it] 79%|███████▊ | 409/520 [25:41<07:01, 3.80s/it] {'loss': 1.3108, 'grad_norm': 0.001948383781083682, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:41<07:01, 3.80s/it] 79%|███████▉ | 410/520 [25:45<06:55, 3.77s/it] {'loss': 1.0318, 'grad_norm': 0.001740883243036254, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:45<06:55, 3.77s/it] 79%|███████▉ | 411/520 [25:48<06:48, 3.75s/it] {'loss': 1.2828, 'grad_norm': 0.00198065059794587, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:48<06:48, 3.75s/it] 79%|███████▉ | 412/520 [25:52<06:41, 3.72s/it] {'loss': 1.1937, 'grad_norm': 0.0017443778028226392, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:52<06:41, 3.72s/it] 79%|███████▉ | 413/520 [25:56<06:36, 3.71s/it] {'loss': 1.2324, 'grad_norm': 0.0017615300838455196, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:56<06:36, 3.71s/it] 80%|███████▉ | 414/520 [25:59<06:33, 3.71s/it] {'loss': 1.0329, 'grad_norm': 0.001475900933277607, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:59<06:33, 3.71s/it] 80%|███████▉ | 415/520 [26:03<06:34, 3.76s/it] {'loss': 1.1704, 'grad_norm': 0.0016929700527951055, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:03<06:34, 3.76s/it] 80%|████████ | 416/520 [26:07<06:34, 3.79s/it] {'loss': 1.0929, 'grad_norm': 0.0019169580534935764, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:07<06:34, 3.79s/it] 80%|████████ | 417/520 [26:11<06:34, 3.83s/it] {'loss': 1.2539, 'grad_norm': 0.0019255807933238338, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:11<06:34, 3.83s/it] 80%|████████ | 418/520 [26:15<06:31, 3.84s/it] {'loss': 1.2418, 'grad_norm': 0.001717306792967696, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:15<06:31, 3.84s/it] 81%|████████ | 419/520 [26:19<06:28, 3.84s/it] {'loss': 1.2294, 'grad_norm': 0.0018664904203607685, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:19<06:28, 3.84s/it] 81%|████████ | 420/520 [26:23<06:19, 3.79s/it] {'loss': 1.118, 'grad_norm': 0.0019455344506997781, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:23<06:19, 3.79s/it] 81%|████████ | 421/520 [26:26<06:11, 3.75s/it] {'loss': 1.0491, 'grad_norm': 0.00212775616235756, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:26<06:11, 3.75s/it] 81%|████████ | 422/520 [26:30<06:03, 3.71s/it] {'loss': 1.1679, 'grad_norm': 0.001795162751958178, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:30<06:03, 3.71s/it] 81%|████████▏ | 423/520 [26:34<06:04, 3.76s/it] {'loss': 1.1605, 'grad_norm': 0.001967167274699136, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:34<06:04, 3.76s/it] 82%|████████▏ | 424/520 [26:37<05:58, 3.74s/it] {'loss': 1.3145, 'grad_norm': 0.0018606482549505577, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:37<05:58, 3.74s/it] 82%|████████▏ | 425/520 [26:41<05:53, 3.72s/it] {'loss': 1.1697, 'grad_norm': 0.001712339863591348, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:41<05:53, 3.72s/it] 82%|████████▏ | 426/520 [26:45<05:47, 3.70s/it] {'loss': 1.1905, 'grad_norm': 0.0023220033480005485, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:45<05:47, 3.70s/it] 82%|████████▏ | 427/520 [26:48<05:43, 3.69s/it] {'loss': 1.1042, 'grad_norm': 0.0016683908997586607, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:48<05:43, 3.69s/it] 82%|████████▏ | 428/520 [26:52<05:38, 3.68s/it] {'loss': 1.0802, 'grad_norm': 0.001801188929310371, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:52<05:38, 3.68s/it] 82%|████████▎ | 429/520 [26:56<05:35, 3.69s/it] {'loss': 1.1782, 'grad_norm': 0.0017247396248211374, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:56<05:35, 3.69s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:59<05:32, 3.69s/it] {'loss': 1.1757, 'grad_norm': 0.0016312624008005092, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:59<05:32, 3.69s/it] 83%|████████▎ | 431/520 [27:03<05:28, 3.69s/it] {'loss': 1.1985, 'grad_norm': 0.001799990595192518, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:03<05:28, 3.69s/it] 83%|████████▎ | 432/520 [27:07<05:25, 3.69s/it] {'loss': 1.088, 'grad_norm': 0.0018870097215196863, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:07<05:25, 3.69s/it] 83%|████████▎ | 433/520 [27:10<05:20, 3.69s/it] {'loss': 1.2225, 'grad_norm': 0.0016849592372375636, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:10<05:20, 3.69s/it] 83%|████████▎ | 434/520 [27:14<05:18, 3.71s/it] {'loss': 0.9647, 'grad_norm': 0.0017971972226316119, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:14<05:18, 3.71s/it] 84%|████████▎ | 435/520 [27:18<05:17, 3.73s/it] {'loss': 1.2663, 'grad_norm': 0.0019679457495197167, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:18<05:17, 3.73s/it] 84%|████████▍ | 436/520 [27:22<05:14, 3.75s/it] {'loss': 1.0496, 'grad_norm': 0.001768346873979825, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:22<05:14, 3.75s/it] 84%|████████▍ | 437/520 [27:26<05:12, 3.76s/it] {'loss': 1.2824, 'grad_norm': 0.0017408512288308725, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:26<05:12, 3.76s/it] 84%|████████▍ | 438/520 [27:29<05:09, 3.77s/it] {'loss': 1.0906, 'grad_norm': 0.001757522677179555, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:29<05:09, 3.77s/it] 84%|████████▍ | 439/520 [27:33<05:05, 3.78s/it] {'loss': 1.1742, 'grad_norm': 0.001515942611306163, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:33<05:05, 3.78s/it] 85%|████████▍ | 440/520 [27:37<05:04, 3.80s/it] {'loss': 1.1424, 'grad_norm': 0.001731896305786343, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:37<05:04, 3.80s/it] 85%|████████▍ | 441/520 [27:41<05:01, 3.81s/it] {'loss': 1.2013, 'grad_norm': 0.001882482079025467, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:41<05:01, 3.81s/it] 85%|████████▌ | 442/520 [27:45<04:57, 3.81s/it] {'loss': 1.1984, 'grad_norm': 0.0019180494940922055, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:45<04:57, 3.81s/it] 85%|████████▌ | 443/520 [27:48<04:53, 3.81s/it] {'loss': 1.2107, 'grad_norm': 0.001765296275351436, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:48<04:53, 3.81s/it] 85%|████████▌ | 444/520 [27:52<04:49, 3.80s/it] {'loss': 1.1791, 'grad_norm': 0.0016177859402696205, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:52<04:49, 3.80s/it] 86%|████████▌ | 445/520 [27:56<04:45, 3.80s/it] {'loss': 1.1023, 'grad_norm': 0.0017408419897363358, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:56<04:45, 3.80s/it] 86%|████████▌ | 446/520 [28:00<04:39, 3.78s/it] {'loss': 1.2727, 'grad_norm': 0.0016577197965964652, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:00<04:39, 3.78s/it] 86%|████████▌ | 447/520 [28:04<04:34, 3.76s/it] {'loss': 1.1902, 'grad_norm': 0.0017355497740829744, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:04<04:34, 3.76s/it] 86%|████████▌ | 448/520 [28:07<04:28, 3.73s/it] {'loss': 1.1718, 'grad_norm': 0.0017627058787641511, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:07<04:28, 3.73s/it] 86%|████████▋ | 449/520 [28:11<04:23, 3.71s/it] {'loss': 1.2255, 'grad_norm': 0.0019513860897464456, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:11<04:23, 3.71s/it] 87%|████████▋ | 450/520 [28:15<04:19, 3.71s/it] {'loss': 1.2085, 'grad_norm': 0.00182481702521829, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:15<04:19, 3.71s/it] 87%|████████▋ | 451/520 [28:18<04:18, 3.74s/it] {'loss': 1.1972, 'grad_norm': 0.0017495315501774742, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:18<04:18, 3.74s/it] 87%|████████▋ | 452/520 [28:22<04:15, 3.75s/it] {'loss': 1.268, 'grad_norm': 0.001647513771091212, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:22<04:15, 3.75s/it] 87%|████████▋ | 453/520 [28:26<04:12, 3.76s/it] {'loss': 1.2416, 'grad_norm': 0.0017104854479968248, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:26<04:12, 3.76s/it] 87%|████████▋ | 454/520 [28:30<04:08, 3.77s/it] {'loss': 1.1137, 'grad_norm': 0.001957419495187883, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:30<04:08, 3.77s/it] 88%|████████▊ | 455/520 [28:34<04:05, 3.78s/it] {'loss': 1.2508, 'grad_norm': 0.001739358641367216, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:34<04:05, 3.78s/it] 88%|████████▊ | 456/520 [28:37<04:02, 3.79s/it] {'loss': 1.1673, 'grad_norm': 0.0018005707016834295, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:37<04:02, 3.79s/it] 88%|████████▊ | 457/520 [28:41<03:59, 3.80s/it] {'loss': 1.1762, 'grad_norm': 0.001583138232470233, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:41<03:59, 3.80s/it] 88%|████████▊ | 458/520 [28:45<03:56, 3.82s/it] {'loss': 1.3097, 'grad_norm': 0.0018744662452583954, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:45<03:56, 3.82s/it] 88%|████████▊ | 459/520 [28:49<03:53, 3.83s/it] {'loss': 1.2419, 'grad_norm': 0.0017919705147192092, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:49<03:53, 3.83s/it] 88%|████████▊ | 460/520 [28:53<03:50, 3.83s/it] {'loss': 1.1175, 'grad_norm': 0.0017174421035306493, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:53<03:50, 3.83s/it] 89%|████████▊ | 461/520 [28:56<03:44, 3.81s/it] {'loss': 1.2626, 'grad_norm': 0.0014327536558487097, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:56<03:44, 3.81s/it] 89%|████████▉ | 462/520 [29:00<03:39, 3.78s/it] {'loss': 1.3176, 'grad_norm': 0.0017051346513362966, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:00<03:39, 3.78s/it] 89%|████████▉ | 463/520 [29:04<03:33, 3.75s/it] {'loss': 1.0832, 'grad_norm': 0.0018923442926620394, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:04<03:33, 3.75s/it] 89%|████████▉ | 464/520 [29:08<03:28, 3.73s/it] {'loss': 1.2238, 'grad_norm': 0.0018384472554677986, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:08<03:28, 3.73s/it] 89%|████████▉ | 465/520 [29:11<03:24, 3.72s/it] {'loss': 1.332, 'grad_norm': 0.0019190275613438377, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:11<03:24, 3.72s/it] 90%|████████▉ | 466/520 [29:15<03:20, 3.71s/it] {'loss': 1.2052, 'grad_norm': 0.001611357356901353, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:15<03:20, 3.71s/it] 90%|████████▉ | 467/520 [29:19<03:15, 3.70s/it] {'loss': 1.2023, 'grad_norm': 0.0016195340665133688, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:19<03:15, 3.70s/it] 90%|█████████ | 468/520 [29:22<03:12, 3.69s/it] {'loss': 1.187, 'grad_norm': 0.0020087591358493805, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:22<03:12, 3.69s/it] 90%|█████████ | 469/520 [29:26<03:08, 3.69s/it] {'loss': 1.2417, 'grad_norm': 0.0019331168314998862, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:26<03:08, 3.69s/it] 90%|█████████ | 470/520 [29:30<03:04, 3.69s/it] {'loss': 1.1179, 'grad_norm': 0.001562529546586759, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:30<03:04, 3.69s/it] 91%|█████████ | 471/520 [29:33<03:00, 3.68s/it] {'loss': 1.1437, 'grad_norm': 0.0018078560030878644, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:33<03:00, 3.68s/it] 91%|█████████ | 472/520 [29:37<02:56, 3.69s/it] {'loss': 1.1135, 'grad_norm': 0.001769038830552537, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:37<02:56, 3.69s/it] 91%|█████████ | 473/520 [29:41<02:53, 3.69s/it] {'loss': 1.1721, 'grad_norm': 0.001781591959468375, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:41<02:53, 3.69s/it] 91%|█████████ | 474/520 [29:44<02:49, 3.69s/it] {'loss': 1.2359, 'grad_norm': 0.0016726117584839285, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:44<02:49, 3.69s/it] 91%|█████████▏| 475/520 [29:48<02:45, 3.68s/it] {'loss': 1.1567, 'grad_norm': 0.0016929753144215962, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:48<02:45, 3.68s/it] 92%|█████████▏| 476/520 [29:52<02:41, 3.68s/it] {'loss': 1.1686, 'grad_norm': 0.001782319493005969, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:52<02:41, 3.68s/it] 92%|█████████▏| 477/520 [29:55<02:37, 3.67s/it] {'loss': 1.1528, 'grad_norm': 0.001926333632861426, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:55<02:37, 3.67s/it] 92%|█████████▏| 478/520 [29:59<02:33, 3.67s/it] {'loss': 1.114, 'grad_norm': 0.0018057780362503032, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:59<02:33, 3.67s/it] 92%|█████████▏| 479/520 [30:03<02:30, 3.67s/it] {'loss': 1.2001, 'grad_norm': 0.0017963522162677652, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:03<02:30, 3.67s/it] 92%|█████████▏| 480/520 [30:06<02:26, 3.66s/it] {'loss': 1.2256, 'grad_norm': 0.001641454286539415, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:06<02:26, 3.66s/it] 92%|█████████▎| 481/520 [30:10<02:23, 3.68s/it] {'loss': 1.2249, 'grad_norm': 0.0016080278708384774, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:10<02:23, 3.68s/it] 93%|█████████▎| 482/520 [30:14<02:19, 3.67s/it] {'loss': 1.2412, 'grad_norm': 0.0021218314518878854, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:14<02:19, 3.67s/it] 93%|█████████▎| 483/520 [30:17<02:16, 3.69s/it] {'loss': 1.1785, 'grad_norm': 0.0018600584214373175, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:17<02:16, 3.69s/it] 93%|█████████▎| 484/520 [30:21<02:13, 3.72s/it] {'loss': 1.1841, 'grad_norm': 0.0018368986637581934, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:21<02:13, 3.72s/it] 93%|█████████▎| 485/520 [30:25<02:10, 3.73s/it] {'loss': 1.1317, 'grad_norm': 0.0016634703686010202, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:25<02:10, 3.73s/it] 93%|█████████▎| 486/520 [30:29<02:06, 3.72s/it] {'loss': 1.2576, 'grad_norm': 0.001809654185025008, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:29<02:06, 3.72s/it] 94%|█████████▎| 487/520 [30:32<02:02, 3.70s/it] {'loss': 1.1071, 'grad_norm': 0.0016445584482093626, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:32<02:02, 3.70s/it] 94%|█████████▍| 488/520 [30:36<01:57, 3.68s/it] {'loss': 1.0504, 'grad_norm': 0.0017502034377247955, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:36<01:57, 3.68s/it] 94%|█████████▍| 489/520 [30:40<01:53, 3.67s/it] {'loss': 1.2307, 'grad_norm': 0.0015087597176024238, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:40<01:53, 3.67s/it] 94%|█████████▍| 490/520 [30:43<01:49, 3.65s/it] {'loss': 1.1757, 'grad_norm': 0.0017733749387127936, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:43<01:49, 3.65s/it] 94%|█████████▍| 491/520 [30:47<01:46, 3.66s/it] {'loss': 1.1349, 'grad_norm': 0.001827726825514985, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:47<01:46, 3.66s/it] 95%|█████████▍| 492/520 [30:51<01:42, 3.65s/it] {'loss': 1.2541, 'grad_norm': 0.0018049379400307702, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:51<01:42, 3.65s/it] 95%|█████████▍| 493/520 [30:54<01:38, 3.66s/it] {'loss': 1.2639, 'grad_norm': 0.0018586073138955074, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:54<01:38, 3.66s/it] 95%|█████████▌| 494/520 [30:58<01:34, 3.65s/it] {'loss': 1.1987, 'grad_norm': 0.001639137383745666, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:58<01:34, 3.65s/it] 95%|█████████▌| 495/520 [31:02<01:31, 3.65s/it] {'loss': 1.1511, 'grad_norm': 0.0017845044817163377, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:02<01:31, 3.65s/it] 95%|█████████▌| 496/520 [31:05<01:27, 3.65s/it] {'loss': 1.0759, 'grad_norm': 0.0017943426783809326, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:05<01:27, 3.65s/it] 96%|█████████▌| 497/520 [31:09<01:24, 3.66s/it] {'loss': 1.1634, 'grad_norm': 0.0015180353167847944, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:09<01:24, 3.66s/it] 96%|█████████▌| 498/520 [31:13<01:20, 3.66s/it] {'loss': 1.1525, 'grad_norm': 0.0018004002552187213, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:13<01:20, 3.66s/it] 96%|█████████▌| 499/520 [31:16<01:17, 3.67s/it] {'loss': 1.3065, 'grad_norm': 0.0018394632194405148, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:16<01:17, 3.67s/it] 96%|█████████▌| 500/520 [31:20<01:13, 3.66s/it] {'loss': 1.2709, 'grad_norm': 0.0020519473811755643, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:20<01:13, 3.66s/it] 96%|█████████▋| 501/520 [31:23<01:09, 3.66s/it] {'loss': 1.2134, 'grad_norm': 0.0019103695401836494, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:23<01:09, 3.66s/it] 97%|█████████▋| 502/520 [31:27<01:05, 3.65s/it] {'loss': 1.1953, 'grad_norm': 0.0016337581403707033, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:27<01:05, 3.65s/it] 97%|█████████▋| 503/520 [31:31<01:02, 3.68s/it] {'loss': 1.1967, 'grad_norm': 0.0017804862581599216, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:31<01:02, 3.68s/it] 97%|█████████▋| 504/520 [31:35<00:58, 3.67s/it] {'loss': 1.1907, 'grad_norm': 0.0020830233555271006, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:35<00:58, 3.67s/it] 97%|█████████▋| 505/520 [31:38<00:54, 3.66s/it] {'loss': 1.2284, 'grad_norm': 0.001796505805656371, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:38<00:54, 3.66s/it] 97%|█████████▋| 506/520 [31:42<00:51, 3.66s/it] {'loss': 1.1445, 'grad_norm': 0.0018563066367581421, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:42<00:51, 3.66s/it] 98%|█████████▊| 507/520 [31:45<00:47, 3.66s/it] {'loss': 1.3471, 'grad_norm': 0.0016888674996207236, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:45<00:47, 3.66s/it] 98%|█████████▊| 508/520 [31:49<00:44, 3.68s/it] {'loss': 1.2668, 'grad_norm': 0.0017843455119250446, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:49<00:44, 3.68s/it] 98%|█████████▊| 509/520 [31:53<00:40, 3.68s/it] {'loss': 1.2309, 'grad_norm': 0.0017126348951132893, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:53<00:40, 3.68s/it] 98%|█████████▊| 510/520 [31:57<00:36, 3.68s/it] {'loss': 1.1828, 'grad_norm': 0.0017073019548309691, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:57<00:36, 3.68s/it] 98%|█████████▊| 511/520 [32:00<00:33, 3.68s/it] {'loss': 1.1611, 'grad_norm': 0.0016811212449231337, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:00<00:33, 3.68s/it] 98%|█████████▊| 512/520 [32:04<00:29, 3.68s/it] {'loss': 1.0436, 'grad_norm': 0.001820250260597972, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:04<00:29, 3.68s/it] 99%|█████████▊| 513/520 [32:08<00:25, 3.70s/it] {'loss': 1.2473, 'grad_norm': 0.0020457912904106555, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:08<00:25, 3.70s/it] 99%|█████████▉| 514/520 [32:11<00:22, 3.71s/it] {'loss': 1.2123, 'grad_norm': 0.001663528411427663, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:11<00:22, 3.71s/it] 99%|█████████▉| 515/520 [32:15<00:18, 3.70s/it] {'loss': 1.2624, 'grad_norm': 0.0020152422521012947, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:15<00:18, 3.70s/it] 99%|█████████▉| 516/520 [32:19<00:14, 3.70s/it] {'loss': 1.1565, 'grad_norm': 0.001671439470027453, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:19<00:14, 3.70s/it] 99%|█████████▉| 517/520 [32:22<00:11, 3.68s/it] {'loss': 1.2469, 'grad_norm': 0.0017172775913499498, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:22<00:11, 3.68s/it] 100%|█████████▉| 518/520 [32:26<00:07, 3.66s/it] {'loss': 1.1824, 'grad_norm': 0.0017825779963981006, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:26<00:07, 3.66s/it] 100%|█████████▉| 519/520 [32:30<00:03, 3.65s/it] {'loss': 1.2058, 'grad_norm': 0.001717098647369745, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:30<00:03, 3.65s/it] 100%|██████████| 520/520 [32:34<00:00, 3.92s/it] {'loss': 1.2262, 'grad_norm': 0.0016998398817481683, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:34<00:00, 3.92s/it] {'train_runtime': 1954.7282, 'train_samples_per_second': 34.035, 'train_steps_per_second': 0.266, 'train_loss': 1.3048745036125182, 'epoch': 1.0} + 100%|██████████| 520/520 [32:34<00:00, 3.92s/it] 100%|██████████| 520/520 [32:34<00:00, 3.76s/it] +[2025-10-13 08:05:52,566] [INFO] [launch.py:348:main] Process 594757 exits successfully. +[2025-10-13 08:05:53,567] [INFO] [launch.py:348:main] Process 594759 exits successfully. +[2025-10-13 08:05:53,568] [INFO] [launch.py:348:main] Process 594760 exits successfully. +[2025-10-13 08:05:53,568] [INFO] [launch.py:348:main] Process 594758 exits successfully. +[2025-10-13 08:05:53,568] [INFO] [launch.py:348:main] Process 594755 exits successfully. +[2025-10-13 08:05:54,570] [INFO] [launch.py:348:main] Process 594756 exits successfully. +[2025-10-13 08:05:54,570] [INFO] [launch.py:348:main] Process 594754 exits successfully. +[2025-10-13 08:05:58,575] [INFO] [launch.py:348:main] Process 594753 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.5_2e-1_connector-3.0_1.5_2e-1_ablation_20251013_073153.log +Timestamp: 2025-10-13 08:06:01 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation_20251013_080601.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation_20251013_080601.log new file mode 100644 index 0000000000000000000000000000000000000000..99a9ceeb7a1e194e4cfc57a7393dcbfbc410f31d --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation_20251013_080601.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation_20251013_080601.log +Timestamp: 2025-10-13 08:06:01 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 08:06:03,880] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:06,776] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 08:06:06,777] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 1.7 --temperature_mlp_text 1.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 1.7 --temperature_mlp_vision 1.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 1.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 08:06:09,362] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:10,394] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 08:06:10,394] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 08:06:10,394] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 08:06:10,394] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 08:06:10,394] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 08:06:10,394] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 08:06:10,395] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 08:06:10,397] [INFO] [launch.py:253:main] process 614873 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 08:06:10,399] [INFO] [launch.py:253:main] process 614874 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 08:06:10,401] [INFO] [launch.py:253:main] process 614877 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 08:06:10,403] [INFO] [launch.py:253:main] process 614878 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 08:06:10,406] [INFO] [launch.py:253:main] process 614879 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 08:06:10,408] [INFO] [launch.py:253:main] process 614882 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 08:06:10,410] [INFO] [launch.py:253:main] process 614883 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 08:06:10,412] [INFO] [launch.py:253:main] process 614884 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 08:06:17,171] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:17,408] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:17,436] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:17,453] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:17,456] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:17,467] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:17,488] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:17,488] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 08:06:17,597] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 08:06:17,832] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 08:06:17,846] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 08:06:17,858] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 08:06:17,859] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 08:06:17,881] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 08:06:17,908] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 08:06:17,908] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 08:06:17,911] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.7, 'temperature_mlp': 1.7, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.7, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.7, + "temperature_mlp": 1.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:614873:614873 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614873:614873 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614873:614873 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:614873:614873 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:614873:614873 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:614873:614873 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:614883:614883 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:614883:614883 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614883:614883 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614883:614883 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:614883:614883 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:614883:614883 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:614879:614879 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:614879:614879 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614879:614879 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614879:614879 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:614879:614879 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:614879:614879 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:614884:614884 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:614884:614884 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614884:614884 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614884:614884 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:614884:614884 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:614884:614884 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:614882:614882 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:614882:614882 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614882:614882 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614882:614882 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:614882:614882 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:614882:614882 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:614878:614878 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:614878:614878 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614878:614878 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614878:614878 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:614878:614878 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:614878:614878 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:614877:614877 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:614877:614877 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614877:614877 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614874:614874 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:614874:614874 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614874:614874 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614877:614877 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:614877:614877 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:614877:614877 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:614874:614874 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:614874:614874 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:614874:614874 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO ncclCommInitRank comm 0x55947b82b700 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x9ad4c89e4b21954 - Init START +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO ncclCommInitRank comm 0x55fc96726be0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x9ad4c89e4b21954 - Init START +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO ncclCommInitRank comm 0x563847476760 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x9ad4c89e4b21954 - Init START +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO ncclCommInitRank comm 0x55ad3f5933c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x9ad4c89e4b21954 - Init START +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO ncclCommInitRank comm 0x5597346d8370 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x9ad4c89e4b21954 - Init START +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO ncclCommInitRank comm 0x55b51d99c780 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x9ad4c89e4b21954 - Init START +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO ncclCommInitRank comm 0x55e0f6c6e600 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x9ad4c89e4b21954 - Init START +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO ncclCommInitRank comm 0x561874a508e0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x9ad4c89e4b21954 - Init START +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO comm 0x55ad3f5933c0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO comm 0x561874a508e0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO comm 0x55fc96726be0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO comm 0x55e0f6c6e600 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO comm 0x55b51d99c780 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO comm 0x563847476760 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO comm 0x55947b82b700 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO comm 0x5597346d8370 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:614882:616525 [5] NCCL INFO ncclCommInitRank comm 0x561874a508e0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x9ad4c89e4b21954 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:614884:616524 [7] NCCL INFO ncclCommInitRank comm 0x55947b82b700 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x9ad4c89e4b21954 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:614878:616526 [3] NCCL INFO ncclCommInitRank comm 0x55b51d99c780 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x9ad4c89e4b21954 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614874:616528 [1] NCCL INFO ncclCommInitRank comm 0x563847476760 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x9ad4c89e4b21954 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:614883:616522 [6] NCCL INFO ncclCommInitRank comm 0x55ad3f5933c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x9ad4c89e4b21954 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:614879:616523 [4] NCCL INFO ncclCommInitRank comm 0x55e0f6c6e600 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x9ad4c89e4b21954 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:614873:616521 [0] NCCL INFO ncclCommInitRank comm 0x55fc96726be0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x9ad4c89e4b21954 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:614877:616527 [2] NCCL INFO ncclCommInitRank comm 0x5597346d8370 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x9ad4c89e4b21954 - Init COMPLETE +[2025-10-13 08:07:01,117] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.laSome weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from yers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 09:16:42,601] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 09:17:00,685 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 09:17:00,689 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:614879:622501 [4] NCCL INFO ncclCommInitRank comm 0x7f195806b010 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xffc7d0a01deb1610 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614883:622504 [6] NCCL INFO ncclCommInitRank comm 0x7fe82c06b330 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xffc7d0a01deb1610 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614878:622505 [3] NCCL INFO ncclCommInitRank comm 0x7f8a5406b2d0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xffc7d0a01deb1610 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614877:622507 [2] NCCL INFO ncclCommInitRank comm 0x7fe44006ad20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xffc7d0a01deb1610 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614873:622500 [0] NCCL INFO ncclCommInitRank comm 0x7fe02006b110 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xffc7d0a01deb1610 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614874:622502 [1] NCCL INFO ncclCommInitRank comm 0x7f122c06ac20 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xffc7d0a01deb1610 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614882:622503 [5] NCCL INFO ncclCommInitRank comm 0x7f9d8806b300 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xffc7d0a01deb1610 - Init COMPLETE +ywang29-vrdb-test1-worker-0:614884:622506 [7] NCCL INFO ncclCommInitRank comm 0x7efcb006af90 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xffc7d0a01deb1610 - Init COMPLETE + 0%| | 1/520 [00:14<2:02:37, 14.18s/it] {'loss': 2.4447, 'grad_norm': 0.053738708753500834, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:02:37, 14.18s/it] 0%| | 2/520 [00:18<1:09:58, 8.10s/it] {'loss': 2.3559, 'grad_norm': 0.05289291417356638, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:09:58, 8.10s/it] 1%| | 3/520 [00:21<53:08, 6.17s/it] {'loss': 1.8736, 'grad_norm': 0.02532290066450769, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<53:08, 6.17s/it] 1%| | 4/520 [00:25<45:04, 5.24s/it] {'loss': 1.6793, 'grad_norm': 0.009507395853379667, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<45:04, 5.24s/it] 1%| | 5/520 [00:29<40:46, 4.75s/it] {'loss': 1.758, 'grad_norm': 0.02054888721952204, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:46, 4.75s/it] 1%| | 6/520 [00:33<37:56, 4.43s/it] {'loss': 1.5327, 'grad_norm': 0.009948815796951399, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:33<37:56, 4.43s/it] 1%|▏ | 7/520 [00:37<35:50, 4.19s/it] {'loss': 1.4859, 'grad_norm': 0.008723021392441709, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:37<35:50, 4.19s/it] 2%|▏ | 8/520 [00:41<36:31, 4.28s/it] {'loss': 1.5225, 'grad_norm': 0.006563012306257258, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:31, 4.28s/it] 2%|▏ | 9/520 [00:45<36:35, 4.30s/it] {'loss': 1.6056, 'grad_norm': 0.007485608112242399, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<36:35, 4.30s/it] 2%|▏ | 10/520 [00:49<34:59, 4.12s/it] {'loss': 1.4131, 'grad_norm': 0.005394357306691611, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<34:59, 4.12s/it] 2%|▏ | 11/520 [00:53<34:02, 4.01s/it] {'loss': 1.4972, 'grad_norm': 0.0051657293479672175, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:02, 4.01s/it] 2%|▏ | 12/520 [00:57<33:00, 3.90s/it] {'loss': 1.4326, 'grad_norm': 0.00480129373568015, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<33:00, 3.90s/it][2025-10-13 09:18:06,887] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<34:04, 4.03s/it] {'loss': 1.4404, 'grad_norm': 0.0037627110427410656, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<34:04, 4.03s/it] 3%|▎ | 14/520 [01:05<33:01, 3.92s/it] {'loss': 1.5092, 'grad_norm': 0.005056006554394836, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<33:01, 3.92s/it] 3%|▎ | 15/520 [01:08<32:19, 3.84s/it] {'loss': 1.4944, 'grad_norm': 0.0035262972432330655, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<32:19, 3.84s/it] 3%|▎ | 16/520 [01:12<31:44, 3.78s/it] {'loss': 1.4563, 'grad_norm': 0.004597333657489717, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<31:44, 3.78s/it] 3%|▎ | 17/520 [01:15<31:16, 3.73s/it] {'loss': 1.5544, 'grad_norm': 0.0034327380080559082, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<31:16, 3.73s/it] 3%|▎ | 18/520 [01:19<31:01, 3.71s/it] {'loss': 1.4267, 'grad_norm': 0.0042487890945069225, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<31:01, 3.71s/it] 4%|▎ | 19/520 [01:23<30:48, 3.69s/it] {'loss': 1.4841, 'grad_norm': 0.00401001190110444, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<30:48, 3.69s/it] 4%|▍ | 20/520 [01:26<30:34, 3.67s/it] {'loss': 1.4584, 'grad_norm': 0.005991511268953731, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<30:34, 3.67s/it] 4%|▍ | 21/520 [01:30<30:29, 3.67s/it] {'loss': 1.4847, 'grad_norm': 0.004318763521106519, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<30:29, 3.67s/it] 4%|▍ | 22/520 [01:34<30:26, 3.67s/it] {'loss': 1.5776, 'grad_norm': 0.0037761005374172584, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:26, 3.67s/it] 4%|▍ | 23/520 [01:37<30:22, 3.67s/it] {'loss': 1.5081, 'grad_norm': 0.0037783267007638946, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:22, 3.67s/it] 5%|▍ | 24/520 [01:41<30:20, 3.67s/it] {'loss': 1.4726, 'grad_norm': 0.0036741562300719615, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:20, 3.67s/it] 5%|▍ | 25/520 [01:45<30:16, 3.67s/it] {'loss': 1.5105, 'grad_norm': 0.0034059511288524404, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:16, 3.67s/it] 5%|▌ | 26/520 [01:48<30:14, 3.67s/it] {'loss': 1.5136, 'grad_norm': 0.003572063136884613, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:14, 3.67s/it] 5%|▌ | 27/520 [01:52<30:08, 3.67s/it] {'loss': 1.4213, 'grad_norm': 0.003177467145939878, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:08, 3.67s/it] 5%|▌ | 28/520 [01:56<30:00, 3.66s/it] {'loss': 1.4134, 'grad_norm': 0.003192129261260015, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<30:00, 3.66s/it] 6%|▌ | 29/520 [01:59<30:00, 3.67s/it] {'loss': 1.4315, 'grad_norm': 0.003363383997366308, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<30:00, 3.67s/it] 6%|▌ | 30/520 [02:03<29:56, 3.67s/it] {'loss': 1.5557, 'grad_norm': 0.0030925174714790823, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<29:56, 3.67s/it] 6%|▌ | 31/520 [02:07<29:53, 3.67s/it] {'loss': 1.4147, 'grad_norm': 0.003023173766513002, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<29:53, 3.67s/it] 6%|▌ | 32/520 [02:10<29:53, 3.68s/it] {'loss': 1.456, 'grad_norm': 0.004412769963567445, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:53, 3.68s/it] 6%|▋ | 33/520 [02:14<30:01, 3.70s/it] {'loss': 1.4289, 'grad_norm': 0.003146509446926701, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<30:01, 3.70s/it] 7%|▋ | 34/520 [02:18<30:20, 3.74s/it] {'loss': 1.415, 'grad_norm': 0.00356066160058164, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<30:20, 3.74s/it] 7%|▋ | 35/520 [02:22<30:29, 3.77s/it] {'loss': 1.4305, 'grad_norm': 0.0034406199798018952, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:22<30:29, 3.77s/it] 7%|▋ | 36/520 [02:26<30:35, 3.79s/it] {'loss': 1.5355, 'grad_norm': 0.0025697806744052, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:26<30:35, 3.79s/it] 7%|▋ | 37/520 [02:30<30:45, 3.82s/it] {'loss': 1.5388, 'grad_norm': 0.0033885144423858936, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:30<30:45, 3.82s/it] 7%|▋ | 38/520 [02:33<30:41, 3.82s/it] {'loss': 1.6107, 'grad_norm': 0.0029085674470016075, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:33<30:41, 3.82s/it] 8%|▊ | 39/520 [02:37<30:40, 3.83s/it] {'loss': 1.4391, 'grad_norm': 0.003128856745703345, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<30:40, 3.83s/it] 8%|▊ | 40/520 [02:41<30:46, 3.85s/it] {'loss': 1.4825, 'grad_norm': 0.002682512114353413, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:41<30:46, 3.85s/it] 8%|▊ | 41/520 [02:45<31:20, 3.93s/it] {'loss': 1.4485, 'grad_norm': 0.0028879505896506497, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:45<31:20, 3.93s/it] 8%|▊ | 42/520 [02:49<31:45, 3.99s/it] {'loss': 1.4824, 'grad_norm': 0.0037304039978415223, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:49<31:45, 3.99s/it] 8%|▊ | 43/520 [02:53<32:02, 4.03s/it] {'loss': 1.4475, 'grad_norm': 0.003407133108002863, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:53<32:02, 4.03s/it] 8%|▊ | 44/520 [02:58<32:11, 4.06s/it] {'loss': 1.5404, 'grad_norm': 0.0026815514150334355, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:58<32:11, 4.06s/it] 9%|▊ | 45/520 [03:02<32:17, 4.08s/it] {'loss': 1.4854, 'grad_norm': 0.0030090870172361666, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:02<32:17, 4.08s/it] 9%|▉ | 46/520 [03:06<32:20, 4.09s/it] {'loss': 1.6197, 'grad_norm': 0.0029684339902482084, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:06<32:20, 4.09s/it] 9%|▉ | 47/520 [03:10<32:22, 4.11s/it] {'loss': 1.4718, 'grad_norm': 0.0026266288408808252, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:10<32:22, 4.11s/it] 9%|▉ | 48/520 [03:14<32:23, 4.12s/it] {'loss': 1.435, 'grad_norm': 0.0030499615708563066, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:14<32:23, 4.12s/it] 9%|▉ | 49/520 [03:18<32:23, 4.13s/it] {'loss': 1.4862, 'grad_norm': 0.002666507505070157, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:18<32:23, 4.13s/it] 10%|▉ | 50/520 [03:22<32:21, 4.13s/it] {'loss': 1.4696, 'grad_norm': 0.0025290397183982765, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:22<32:21, 4.13s/it] 10%|▉ | 51/520 [03:27<32:13, 4.12s/it] {'loss': 1.3971, 'grad_norm': 0.0026897755979682436, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:27<32:13, 4.12s/it] 10%|█ | 52/520 [03:31<32:07, 4.12s/it] {'loss': 1.5374, 'grad_norm': 0.003082246988665854, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:31<32:07, 4.12s/it] 10%|█ | 53/520 [03:35<32:00, 4.11s/it] {'loss': 1.5309, 'grad_norm': 0.0028837905083424153, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:35<32:00, 4.11s/it] 10%|█ | 54/520 [03:39<31:56, 4.11s/it] {'loss': 1.4242, 'grad_norm': 0.0027229754165965987, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:39<31:56, 4.11s/it] 11%|█ | 55/520 [03:43<31:49, 4.11s/it] {'loss': 1.4039, 'grad_norm': 0.0028440685072718378, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:43<31:49, 4.11s/it] 11%|█ | 56/520 [03:47<31:45, 4.11s/it] {'loss': 1.5436, 'grad_norm': 0.0027209282747658672, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:47<31:45, 4.11s/it] 11%|█ | 57/520 [03:51<31:41, 4.11s/it] {'loss': 1.385, 'grad_norm': 0.0036575494169736864, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:51<31:41, 4.11s/it] 11%|█ | 58/520 [03:55<31:07, 4.04s/it] {'loss': 1.5506, 'grad_norm': 0.002512773327102663, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:55<31:07, 4.04s/it] 11%|█▏ | 59/520 [03:59<30:44, 4.00s/it] {'loss': 1.4044, 'grad_norm': 0.0030407236624533616, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:59<30:44, 4.00s/it] 12%|█▏ | 60/520 [04:03<30:25, 3.97s/it] {'loss': 1.4916, 'grad_norm': 0.004318516809464557, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:03<30:25, 3.97s/it] 12%|█▏ | 61/520 [04:07<29:55, 3.91s/it] {'loss': 1.5209, 'grad_norm': 0.0052467522445487184, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:07<29:55, 3.91s/it] 12%|█▏ | 62/520 [04:10<29:15, 3.83s/it] {'loss': 1.4474, 'grad_norm': 0.00275559498413132, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:10<29:15, 3.83s/it] 12%|█▏ | 63/520 [04:14<28:46, 3.78s/it] {'loss': 1.4413, 'grad_norm': 0.003739543488266356, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:14<28:46, 3.78s/it] 12%|█▏ | 64/520 [04:18<28:32, 3.76s/it] {'loss': 1.4725, 'grad_norm': 0.002635472578738635, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:18<28:32, 3.76s/it] 12%|█▎ | 65/520 [04:21<28:38, 3.78s/it] {'loss': 1.4694, 'grad_norm': 0.002710736808069223, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:21<28:38, 3.78s/it] 13%|█▎ | 66/520 [04:25<28:20, 3.75s/it] {'loss': 1.4398, 'grad_norm': 0.004041290191811826, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:25<28:20, 3.75s/it] 13%|█▎ | 67/520 [04:29<28:04, 3.72s/it] {'loss': 1.3232, 'grad_norm': 0.0023527974903564458, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:29<28:04, 3.72s/it] 13%|█▎ | 68/520 [04:32<27:53, 3.70s/it] {'loss': 1.3797, 'grad_norm': 0.0024282736292985884, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:32<27:53, 3.70s/it] 13%|█▎ | 69/520 [04:36<27:38, 3.68s/it] {'loss': 1.3719, 'grad_norm': 0.004090238778689172, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:36<27:38, 3.68s/it] 13%|█▎ | 70/520 [04:40<27:32, 3.67s/it] {'loss': 1.4097, 'grad_norm': 0.002875983665742504, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:40<27:32, 3.67s/it] 14%|█▎ | 71/520 [04:43<27:28, 3.67s/it] {'loss': 1.3412, 'grad_norm': 0.0027543043376780938, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:43<27:28, 3.67s/it] 14%|█▍ | 72/520 [04:47<27:28, 3.68s/it] {'loss': 1.4924, 'grad_norm': 0.0034120174949248292, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:47<27:28, 3.68s/it] 14%|█▍ | 73/520 [04:51<27:18, 3.67s/it] {'loss': 1.3164, 'grad_norm': 0.002512822109672824, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:51<27:18, 3.67s/it] 14%|█▍ | 74/520 [04:54<27:11, 3.66s/it] {'loss': 1.4354, 'grad_norm': 0.002687011545291524, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:54<27:11, 3.66s/it] 14%|█▍ | 75/520 [04:58<27:03, 3.65s/it] {'loss': 1.3389, 'grad_norm': 0.002981156596291579, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:58<27:03, 3.65s/it] 15%|█▍ | 76/520 [05:02<27:01, 3.65s/it] {'loss': 1.8998, 'grad_norm': 0.013437230187122116, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:02<27:01, 3.65s/it] 15%|█▍ | 77/520 [05:05<26:57, 3.65s/it] {'loss': 1.2677, 'grad_norm': 0.002838196310783489, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:05<26:57, 3.65s/it] 15%|█▌ | 78/520 [05:09<26:55, 3.65s/it] {'loss': 1.3961, 'grad_norm': 0.0027912791643533246, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:09<26:55, 3.65s/it] 15%|█▌ | 79/520 [05:13<26:46, 3.64s/it] {'loss': 1.3765, 'grad_norm': 0.002423403261084094, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:13<26:46, 3.64s/it] 15%|█▌ | 80/520 [05:16<26:40, 3.64s/it] {'loss': 1.6371, 'grad_norm': 0.006405423676254936, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:16<26:40, 3.64s/it] 16%|█▌ | 81/520 [05:20<26:33, 3.63s/it] {'loss': 1.5358, 'grad_norm': 0.0037887641436403197, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:20<26:33, 3.63s/it] 16%|█▌ | 82/520 [05:23<26:31, 3.63s/it] {'loss': 1.4518, 'grad_norm': 0.002564352478287506, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:23<26:31, 3.63s/it] 16%|█▌ | 83/520 [05:27<26:25, 3.63s/it] {'loss': 1.4878, 'grad_norm': 0.002970711846785015, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:27<26:25, 3.63s/it] 16%|█▌ | 84/520 [05:31<26:21, 3.63s/it] {'loss': 1.4846, 'grad_norm': 0.0030360086698465404, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:31<26:21, 3.63s/it] 16%|█▋ | 85/520 [05:34<26:16, 3.62s/it] {'loss': 1.5043, 'grad_norm': 0.002840598270023005, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:34<26:16, 3.62s/it] 17%|█▋ | 86/520 [05:38<26:16, 3.63s/it] {'loss': 1.524, 'grad_norm': 0.0031834023163822777, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:38<26:16, 3.63s/it] 17%|█▋ | 87/520 [05:42<26:13, 3.63s/it] {'loss': 1.6711, 'grad_norm': 0.009077739004798902, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:42<26:13, 3.63s/it] 17%|█▋ | 88/520 [05:45<26:11, 3.64s/it] {'loss': 1.6703, 'grad_norm': 0.0064822686118107915, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:45<26:11, 3.64s/it] 17%|█▋ | 89/520 [05:49<26:07, 3.64s/it] {'loss': 1.4603, 'grad_norm': 0.00273907958106406, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:49<26:07, 3.64s/it] 17%|█▋ | 90/520 [05:53<26:03, 3.63s/it] {'loss': 1.3874, 'grad_norm': 0.0025208932804595308, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:53<26:03, 3.63s/it] 18%|█▊ | 91/520 [05:56<25:56, 3.63s/it] {'loss': 1.4734, 'grad_norm': 0.002727409529259102, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:56<25:56, 3.63s/it] 18%|█▊ | 92/520 [06:00<25:56, 3.64s/it] {'loss': 1.4032, 'grad_norm': 0.0027349979839366607, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:00<25:56, 3.64s/it] 18%|█▊ | 93/520 [06:03<25:56, 3.64s/it] {'loss': 1.4125, 'grad_norm': 0.002470234733130641, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:03<25:56, 3.64s/it] 18%|█▊ | 94/520 [06:07<25:50, 3.64s/it] {'loss': 1.5214, 'grad_norm': 0.0036227549009659704, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:07<25:50, 3.64s/it] 18%|█▊ | 95/520 [06:11<25:45, 3.64s/it] {'loss': 1.3924, 'grad_norm': 0.0030879899428530623, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:11<25:45, 3.64s/it] 18%|█▊ | 96/520 [06:14<25:39, 3.63s/it] {'loss': 1.3974, 'grad_norm': 0.0022770909968103463, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:14<25:39, 3.63s/it] 19%|█▊ | 97/520 [06:18<25:37, 3.63s/it] {'loss': 1.3745, 'grad_norm': 0.002728858050724923, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:18<25:37, 3.63s/it] 19%|█▉ | 98/520 [06:22<25:35, 3.64s/it] {'loss': 1.3662, 'grad_norm': 0.0022258686999810607, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:22<25:35, 3.64s/it] 19%|█▉ | 99/520 [06:25<25:34, 3.64s/it] {'loss': 1.3964, 'grad_norm': 0.002680731416023072, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:25<25:34, 3.64s/it] 19%|█▉ | 100/520 [06:29<25:32, 3.65s/it] {'loss': 1.6548, 'grad_norm': 0.005399315537933914, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:29<25:32, 3.65s/it] 19%|█▉ | 101/520 [06:33<25:25, 3.64s/it] {'loss': 1.3861, 'grad_norm': 0.00267382226572801, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:33<25:25, 3.64s/it] 20%|█▉ | 102/520 [06:36<25:23, 3.64s/it] {'loss': 1.3944, 'grad_norm': 0.00244317271204577, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:36<25:23, 3.64s/it] 20%|█▉ | 103/520 [06:40<25:21, 3.65s/it] {'loss': 1.3199, 'grad_norm': 0.0022116926226281035, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:40<25:21, 3.65s/it] 20%|██ | 104/520 [06:44<25:17, 3.65s/it] {'loss': 1.4044, 'grad_norm': 0.0024270265406187014, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:44<25:17, 3.65s/it] 20%|██ | 105/520 [06:47<25:14, 3.65s/it] {'loss': 1.398, 'grad_norm': 0.0022800971565236266, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:47<25:14, 3.65s/it] 20%|██ | 106/520 [06:51<25:12, 3.65s/it] {'loss': 1.5287, 'grad_norm': 0.004063749105184557, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:51<25:12, 3.65s/it] 21%|██ | 107/520 [06:54<25:06, 3.65s/it] {'loss': 1.499, 'grad_norm': 0.003059815027338144, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:54<25:06, 3.65s/it] 21%|██ | 108/520 [06:58<25:06, 3.66s/it] {'loss': 1.3562, 'grad_norm': 0.0027496780881043586, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:58<25:06, 3.66s/it] 21%|██ | 109/520 [07:02<25:00, 3.65s/it] {'loss': 1.4874, 'grad_norm': 0.002659721687320618, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:02<25:00, 3.65s/it] 21%|██ | 110/520 [07:05<24:52, 3.64s/it] {'loss': 1.5491, 'grad_norm': 0.0024214450519505527, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:05<24:52, 3.64s/it] 21%|██▏ | 111/520 [07:09<24:50, 3.65s/it] {'loss': 1.546, 'grad_norm': 0.002700969453699496, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:09<24:50, 3.65s/it] 22%|██▏ | 112/520 [07:13<24:45, 3.64s/it] {'loss': 1.4345, 'grad_norm': 0.0023545775066370223, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:13<24:45, 3.64s/it] 22%|██▏ | 113/520 [07:16<24:48, 3.66s/it] {'loss': 1.3028, 'grad_norm': 0.0020325651896318134, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:16<24:48, 3.66s/it] 22%|██▏ | 114/520 [07:20<24:44, 3.66s/it] {'loss': 1.4042, 'grad_norm': 0.002329511098294474, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:20<24:44, 3.66s/it] 22%|██▏ | 115/520 [07:24<24:43, 3.66s/it] {'loss': 1.5238, 'grad_norm': 0.0022158439001146455, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:24<24:43, 3.66s/it] 22%|██▏ | 116/520 [07:27<24:40, 3.66s/it] {'loss': 1.5159, 'grad_norm': 0.0021989365925594224, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:27<24:40, 3.66s/it] 22%|██▎ | 117/520 [07:31<24:39, 3.67s/it] {'loss': 1.5015, 'grad_norm': 0.002726467334288996, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:31<24:39, 3.67s/it] 23%|██▎ | 118/520 [07:35<24:31, 3.66s/it] {'loss': 1.3735, 'grad_norm': 0.0020705807310270993, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:35<24:31, 3.66s/it] 23%|██▎ | 119/520 [07:38<24:32, 3.67s/it] {'loss': 1.3299, 'grad_norm': 0.002221707219896404, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:38<24:32, 3.67s/it] 23%|██▎ | 120/520 [07:42<24:22, 3.66s/it] {'loss': 1.3481, 'grad_norm': 0.002608415771636993, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:42<24:22, 3.66s/it] 23%|██▎ | 121/520 [07:46<24:17, 3.65s/it] {'loss': 1.4168, 'grad_norm': 0.002592075621776868, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:46<24:17, 3.65s/it] 23%|██▎ | 122/520 [07:49<24:14, 3.65s/it] {'loss': 1.3019, 'grad_norm': 0.002230770333500607, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:49<24:14, 3.65s/it] 24%|██▎ | 123/520 [07:53<24:05, 3.64s/it] {'loss': 1.5731, 'grad_norm': 0.003267065263596455, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:53<24:05, 3.64s/it] 24%|██▍ | 124/520 [07:57<24:00, 3.64s/it] {'loss': 1.3903, 'grad_norm': 0.0025276975493335996, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:57<24:00, 3.64s/it] 24%|██▍ | 125/520 [08:00<23:58, 3.64s/it] {'loss': 1.3794, 'grad_norm': 0.0022867469284970567, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:00<23:58, 3.64s/it] 24%|██▍ | 126/520 [08:05<25:13, 3.84s/it] {'loss': 1.4865, 'grad_norm': 0.0025394474072004537, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:05<25:13, 3.84s/it] 24%|██▍ | 127/520 [08:08<24:45, 3.78s/it] {'loss': 1.3549, 'grad_norm': 0.0028579224559599436, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:08<24:45, 3.78s/it] 25%|██▍ | 128/520 [08:12<24:25, 3.74s/it] {'loss': 1.412, 'grad_norm': 0.0022834419150050303, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:12<24:25, 3.74s/it] 25%|██▍ | 129/520 [08:15<24:10, 3.71s/it] {'loss': 1.3303, 'grad_norm': 0.0018340287096081953, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:15<24:10, 3.71s/it] 25%|██▌ | 130/520 [08:19<23:59, 3.69s/it] {'loss': 1.3984, 'grad_norm': 0.0021351855319979157, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:19<23:59, 3.69s/it] 25%|██▌ | 131/520 [08:23<23:50, 3.68s/it] {'loss': 1.406, 'grad_norm': 0.002549713312360759, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:23<23:50, 3.68s/it] 25%|██▌ | 132/520 [08:26<23:40, 3.66s/it] {'loss': 1.4358, 'grad_norm': 0.002287142097047306, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:26<23:40, 3.66s/it] 26%|██▌ | 133/520 [08:30<23:32, 3.65s/it] {'loss': 1.3414, 'grad_norm': 0.002292304657927264, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:30<23:32, 3.65s/it] 26%|██▌ | 134/520 [08:34<23:26, 3.64s/it] {'loss': 1.4205, 'grad_norm': 0.0024989405277847957, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:34<23:26, 3.64s/it] 26%|██▌ | 135/520 [08:37<23:20, 3.64s/it] {'loss': 1.4934, 'grad_norm': 0.002220082993402579, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:37<23:20, 3.64s/it] 26%|██▌ | 136/520 [08:41<23:22, 3.65s/it] {'loss': 1.4094, 'grad_norm': 0.002507157795542203, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:41<23:22, 3.65s/it] 26%|██▋ | 137/520 [08:45<23:15, 3.64s/it] {'loss': 1.3335, 'grad_norm': 0.0024720053008432195, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:45<23:15, 3.64s/it] 27%|██▋ | 138/520 [08:48<23:11, 3.64s/it] {'loss': 1.3469, 'grad_norm': 0.002150569464088381, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:48<23:11, 3.64s/it] 27%|██▋ | 139/520 [08:52<23:06, 3.64s/it] {'loss': 1.3046, 'grad_norm': 0.0023050687682746413, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:52<23:06, 3.64s/it] 27%|██▋ | 140/520 [08:55<23:05, 3.65s/it] {'loss': 1.449, 'grad_norm': 0.0025115359843373287, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:55<23:05, 3.65s/it] 27%|██▋ | 141/520 [08:59<23:02, 3.65s/it] {'loss': 1.4613, 'grad_norm': 0.0021827900879489446, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:59<23:02, 3.65s/it] 27%|██▋ | 142/520 [09:03<23:00, 3.65s/it] {'loss': 1.4958, 'grad_norm': 0.0021162332206560064, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:03<23:00, 3.65s/it] 28%|██▊ | 143/520 [09:06<22:52, 3.64s/it] {'loss': 1.3857, 'grad_norm': 0.002479595541820381, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:06<22:52, 3.64s/it] 28%|██▊ | 144/520 [09:10<22:52, 3.65s/it] {'loss': 1.3284, 'grad_norm': 0.002180154578251579, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:10<22:52, 3.65s/it] 28%|██▊ | 145/520 [09:14<22:48, 3.65s/it] {'loss': 1.2709, 'grad_norm': 0.0019917128085259347, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:14<22:48, 3.65s/it] 28%|██▊ | 146/520 [09:17<22:43, 3.65s/it] {'loss': 1.5194, 'grad_norm': 0.0023614646913921334, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:17<22:43, 3.65s/it] 28%|██▊ | 147/520 [09:21<22:40, 3.65s/it] {'loss': 1.3119, 'grad_norm': 0.0021963241486739112, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:21<22:40, 3.65s/it] 28%|██▊ | 148/520 [09:25<22:33, 3.64s/it] {'loss': 1.3454, 'grad_norm': 0.0020608937110805875, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:25<22:33, 3.64s/it] 29%|██▊ | 149/520 [09:28<22:34, 3.65s/it] {'loss': 1.297, 'grad_norm': 0.0022365897997525773, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:28<22:34, 3.65s/it] 29%|██▉ | 150/520 [09:32<22:31, 3.65s/it] {'loss': 1.5301, 'grad_norm': 0.002319918842404785, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:32<22:31, 3.65s/it] 29%|██▉ | 151/520 [09:36<22:31, 3.66s/it] {'loss': 1.339, 'grad_norm': 0.0021046988965050563, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:36<22:31, 3.66s/it] 29%|██▉ | 152/520 [09:39<22:26, 3.66s/it] {'loss': 1.3028, 'grad_norm': 0.0021749371269856733, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:39<22:26, 3.66s/it] 29%|██▉ | 153/520 [09:43<22:26, 3.67s/it] {'loss': 1.3361, 'grad_norm': 0.002199274072422491, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:43<22:26, 3.67s/it] 30%|██▉ | 154/520 [09:47<22:22, 3.67s/it] {'loss': 1.4315, 'grad_norm': 0.0021357112218230315, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:47<22:22, 3.67s/it] 30%|██▉ | 155/520 [09:50<22:16, 3.66s/it] {'loss': 1.3403, 'grad_norm': 0.002253959416829382, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:50<22:16, 3.66s/it] 30%|███ | 156/520 [09:54<22:13, 3.66s/it] {'loss': 1.3661, 'grad_norm': 0.002292767582630908, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:54<22:13, 3.66s/it] 30%|███ | 157/520 [09:58<22:11, 3.67s/it] {'loss': 1.525, 'grad_norm': 0.002586538058869034, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:58<22:11, 3.67s/it] 30%|███ | 158/520 [10:01<22:06, 3.67s/it] {'loss': 1.3421, 'grad_norm': 0.00244627137101247, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:01<22:06, 3.67s/it] 31%|███ | 159/520 [10:05<22:00, 3.66s/it] {'loss': 1.3668, 'grad_norm': 0.0020531989594307623, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:05<22:00, 3.66s/it] 31%|███ | 160/520 [10:09<21:55, 3.65s/it] {'loss': 1.3945, 'grad_norm': 0.0022145048855957785, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:09<21:55, 3.65s/it] 31%|███ | 161/520 [10:12<21:53, 3.66s/it] {'loss': 1.3756, 'grad_norm': 0.0021086970224235423, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:12<21:53, 3.66s/it] 31%|███ | 162/520 [10:16<21:50, 3.66s/it] {'loss': 1.4541, 'grad_norm': 0.0022842529291871886, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:16<21:50, 3.66s/it] 31%|███▏ | 163/520 [10:20<21:54, 3.68s/it] {'loss': 1.2581, 'grad_norm': 0.0028910470748054868, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:20<21:54, 3.68s/it] 32%|███▏ | 164/520 [10:23<22:01, 3.71s/it] {'loss': 1.2255, 'grad_norm': 0.002080310492238927, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:23<22:01, 3.71s/it] 32%|███▏ | 165/520 [10:27<22:10, 3.75s/it] {'loss': 1.3561, 'grad_norm': 0.0018799279247966236, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:27<22:10, 3.75s/it] 32%|███▏ | 166/520 [10:31<22:12, 3.76s/it] {'loss': 1.3531, 'grad_norm': 0.002241502739196257, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:31<22:12, 3.76s/it] 32%|███▏ | 167/520 [10:35<22:02, 3.75s/it] {'loss': 1.3597, 'grad_norm': 0.0024308102142674417, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:35<22:02, 3.75s/it] 32%|███▏ | 168/520 [10:38<21:50, 3.72s/it] {'loss': 1.2724, 'grad_norm': 0.0020458929056858632, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:38<21:50, 3.72s/it] 32%|███▎ | 169/520 [10:42<21:39, 3.70s/it] {'loss': 1.3678, 'grad_norm': 0.0021488202229165247, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:42<21:39, 3.70s/it] 33%|███▎ | 170/520 [10:46<21:28, 3.68s/it] {'loss': 1.3702, 'grad_norm': 0.0024244898857918016, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:46<21:28, 3.68s/it] 33%|███▎ | 171/520 [10:49<21:30, 3.70s/it] {'loss': 1.2967, 'grad_norm': 0.00227896559188181, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:49<21:30, 3.70s/it] 33%|███▎ | 172/520 [10:53<21:24, 3.69s/it] {'loss': 1.365, 'grad_norm': 0.0020898796107953997, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:53<21:24, 3.69s/it] 33%|███▎ | 173/520 [10:57<21:19, 3.69s/it] {'loss': 1.2981, 'grad_norm': 0.0020586254180600585, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:57<21:19, 3.69s/it] 33%|███▎ | 174/520 [11:01<21:13, 3.68s/it] {'loss': 1.3705, 'grad_norm': 0.0023259327070305333, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:01<21:13, 3.68s/it] 34%|███▎ | 175/520 [11:04<21:10, 3.68s/it] {'loss': 1.2635, 'grad_norm': 0.0020299408409780056, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:04<21:10, 3.68s/it] 34%|███▍ | 176/520 [11:08<21:02, 3.67s/it] {'loss': 1.4606, 'grad_norm': 0.002169634930550372, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:08<21:02, 3.67s/it] 34%|███▍ | 177/520 [11:12<20:59, 3.67s/it] {'loss': 1.3157, 'grad_norm': 0.0022123496450175153, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:12<20:59, 3.67s/it] 34%|███▍ | 178/520 [11:15<20:51, 3.66s/it] {'loss': 1.3361, 'grad_norm': 0.0022219085450131655, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:15<20:51, 3.66s/it] 34%|███▍ | 179/520 [11:19<20:47, 3.66s/it] {'loss': 1.4307, 'grad_norm': 0.002025974985652731, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:19<20:47, 3.66s/it] 35%|███▍ | 180/520 [11:22<20:41, 3.65s/it] {'loss': 1.339, 'grad_norm': 0.0022403575789600922, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:22<20:41, 3.65s/it] 35%|███▍ | 181/520 [11:26<20:34, 3.64s/it] {'loss': 1.3135, 'grad_norm': 0.0018542814075871, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:26<20:34, 3.64s/it] 35%|███▌ | 182/520 [11:30<20:36, 3.66s/it] {'loss': 1.3192, 'grad_norm': 0.00200204717982365, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:30<20:36, 3.66s/it] 35%|███▌ | 183/520 [11:33<20:35, 3.67s/it] {'loss': 1.3549, 'grad_norm': 0.002204374494381894, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:33<20:35, 3.67s/it] 35%|███▌ | 184/520 [11:37<20:31, 3.66s/it] {'loss': 1.266, 'grad_norm': 0.0022338867204376896, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:37<20:31, 3.66s/it] 36%|███▌ | 185/520 [11:41<20:33, 3.68s/it] {'loss': 1.4375, 'grad_norm': 0.0020943617266014573, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:41<20:33, 3.68s/it] 36%|███▌ | 186/520 [11:44<20:24, 3.67s/it] {'loss': 1.2865, 'grad_norm': 0.001997019784017508, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:44<20:24, 3.67s/it] 36%|███▌ | 187/520 [11:48<20:20, 3.66s/it] {'loss': 1.2995, 'grad_norm': 0.002412914800908527, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:48<20:20, 3.66s/it] 36%|███▌ | 188/520 [11:52<20:28, 3.70s/it] {'loss': 1.3902, 'grad_norm': 0.0021422236068874122, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:52<20:28, 3.70s/it] 36%|███▋ | 189/520 [11:56<20:34, 3.73s/it] {'loss': 1.3834, 'grad_norm': 0.0019484799008360931, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:56<20:34, 3.73s/it] 37%|███▋ | 190/520 [11:59<20:37, 3.75s/it] {'loss': 1.3015, 'grad_norm': 0.002148865379386473, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:59<20:37, 3.75s/it] 37%|███▋ | 191/520 [12:03<20:39, 3.77s/it] {'loss': 1.2583, 'grad_norm': 0.0019359265116042333, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:03<20:39, 3.77s/it] 37%|███▋ | 192/520 [12:07<20:40, 3.78s/it] {'loss': 1.3541, 'grad_norm': 0.0020963001834048843, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:07<20:40, 3.78s/it] 37%|███▋ | 193/520 [12:11<20:39, 3.79s/it] {'loss': 1.3822, 'grad_norm': 0.0025279667286241874, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:11<20:39, 3.79s/it] 37%|███▋ | 194/520 [12:15<20:35, 3.79s/it] {'loss': 1.2594, 'grad_norm': 0.0020076134200872186, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:15<20:35, 3.79s/it] 38%|███▊ | 195/520 [12:19<20:32, 3.79s/it] {'loss': 1.3666, 'grad_norm': 0.002174310143983626, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:19<20:32, 3.79s/it] 38%|███▊ | 196/520 [12:22<20:32, 3.80s/it] {'loss': 1.3325, 'grad_norm': 0.0021908514588453703, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:22<20:32, 3.80s/it] 38%|███▊ | 197/520 [12:26<20:28, 3.80s/it] {'loss': 1.285, 'grad_norm': 0.0019561647020829615, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:26<20:28, 3.80s/it] 38%|███▊ | 198/520 [12:30<20:28, 3.82s/it] {'loss': 1.36, 'grad_norm': 0.002148813420833637, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:30<20:28, 3.82s/it] 38%|███▊ | 199/520 [12:34<20:22, 3.81s/it] {'loss': 1.2795, 'grad_norm': 0.0022257120335628135, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:34<20:22, 3.81s/it] 38%|███▊ | 200/520 [12:38<20:24, 3.83s/it] {'loss': 1.3202, 'grad_norm': 0.002149618492747803, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:38<20:24, 3.83s/it] 39%|███▊ | 201/520 [12:41<20:19, 3.82s/it] {'loss': 1.3273, 'grad_norm': 0.001881744389130323, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:41<20:19, 3.82s/it] 39%|███▉ | 202/520 [12:45<20:14, 3.82s/it] {'loss': 1.2774, 'grad_norm': 0.0019549845063532418, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:45<20:14, 3.82s/it] 39%|███▉ | 203/520 [12:49<20:13, 3.83s/it] {'loss': 1.3264, 'grad_norm': 0.002091023473075259, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:49<20:13, 3.83s/it] 39%|███▉ | 204/520 [12:53<20:09, 3.83s/it] {'loss': 1.3537, 'grad_norm': 0.002171340916369968, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:53<20:09, 3.83s/it] 39%|███▉ | 205/520 [12:57<20:10, 3.84s/it] {'loss': 1.3409, 'grad_norm': 0.0022806329987082, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:57<20:10, 3.84s/it] 40%|███▉ | 206/520 [13:01<20:06, 3.84s/it] {'loss': 1.3997, 'grad_norm': 0.0020100058510892547, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:01<20:06, 3.84s/it] 40%|███▉ | 207/520 [13:05<20:04, 3.85s/it] {'loss': 1.3183, 'grad_norm': 0.002201532192168137, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:05<20:04, 3.85s/it] 40%|████ | 208/520 [13:08<19:45, 3.80s/it] {'loss': 1.3698, 'grad_norm': 0.002231545327507531, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:08<19:45, 3.80s/it] 40%|████ | 209/520 [13:12<19:45, 3.81s/it] {'loss': 1.281, 'grad_norm': 0.0019188450580399973, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:12<19:45, 3.81s/it] 40%|████ | 210/520 [13:16<19:45, 3.83s/it] {'loss': 1.3729, 'grad_norm': 0.0023146045531144644, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:16<19:45, 3.83s/it] 41%|████ | 211/520 [13:20<19:44, 3.83s/it] {'loss': 1.369, 'grad_norm': 0.0019390780945014471, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:20<19:44, 3.83s/it] 41%|████ | 212/520 [13:24<19:40, 3.83s/it] {'loss': 1.3419, 'grad_norm': 0.0019105627128096112, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:24<19:40, 3.83s/it] 41%|████ | 213/520 [13:27<19:38, 3.84s/it] {'loss': 1.3042, 'grad_norm': 0.002336080155491103, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:27<19:38, 3.84s/it] 41%|████ | 214/520 [13:31<19:24, 3.81s/it] {'loss': 1.2947, 'grad_norm': 0.0021083552655161153, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:31<19:24, 3.81s/it] 41%|████▏ | 215/520 [13:35<19:08, 3.76s/it] {'loss': 1.2541, 'grad_norm': 0.0019932568757691457, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:35<19:08, 3.76s/it] 42%|████▏ | 216/520 [13:38<18:53, 3.73s/it] {'loss': 1.2079, 'grad_norm': 0.0020011254986232268, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:38<18:53, 3.73s/it] 42%|████▏ | 217/520 [13:42<18:43, 3.71s/it] {'loss': 1.3322, 'grad_norm': 0.0020517099563657575, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:42<18:43, 3.71s/it] 42%|████▏ | 218/520 [13:46<18:35, 3.69s/it] {'loss': 1.3294, 'grad_norm': 0.0020931961472006487, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:46<18:35, 3.69s/it] 42%|████▏ | 219/520 [13:49<18:25, 3.67s/it] {'loss': 1.3048, 'grad_norm': 0.0017694962462600644, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:49<18:25, 3.67s/it] 42%|████▏ | 220/520 [13:53<18:21, 3.67s/it] {'loss': 1.3035, 'grad_norm': 0.0019545376152107917, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:53<18:21, 3.67s/it] 42%|████▎ | 221/520 [13:57<18:19, 3.68s/it] {'loss': 1.3319, 'grad_norm': 0.002067480314877432, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:57<18:19, 3.68s/it] 43%|████▎ | 222/520 [14:00<18:14, 3.67s/it] {'loss': 1.2498, 'grad_norm': 0.0018980289499187686, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:00<18:14, 3.67s/it] 43%|████▎ | 223/520 [14:04<18:09, 3.67s/it] {'loss': 1.2365, 'grad_norm': 0.001825699424497245, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:04<18:09, 3.67s/it] 43%|████▎ | 224/520 [14:08<18:10, 3.68s/it] {'loss': 1.4548, 'grad_norm': 0.003075098189136441, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:08<18:10, 3.68s/it] 43%|████▎ | 225/520 [14:12<18:07, 3.69s/it] {'loss': 1.2572, 'grad_norm': 0.0020000496532963145, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:12<18:07, 3.69s/it] 43%|████▎ | 226/520 [14:15<18:01, 3.68s/it] {'loss': 1.3635, 'grad_norm': 0.0018521152270675827, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:15<18:01, 3.68s/it] 44%|████▎ | 227/520 [14:19<17:59, 3.69s/it] {'loss': 1.34, 'grad_norm': 0.0018587039968784233, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:19<17:59, 3.69s/it] 44%|████▍ | 228/520 [14:23<17:55, 3.68s/it] {'loss': 1.4411, 'grad_norm': 0.0020288014128259997, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:23<17:55, 3.68s/it] 44%|████▍ | 229/520 [14:26<17:49, 3.67s/it] {'loss': 1.3135, 'grad_norm': 0.001770302553748888, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:26<17:49, 3.67s/it] 44%|████▍ | 230/520 [14:30<17:52, 3.70s/it] {'loss': 1.1994, 'grad_norm': 0.0019476977037863582, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:30<17:52, 3.70s/it] 44%|████▍ | 231/520 [14:34<17:45, 3.69s/it] {'loss': 1.2621, 'grad_norm': 0.0017467037380404047, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:34<17:45, 3.69s/it] 45%|████▍ | 232/520 [14:37<17:41, 3.68s/it] {'loss': 1.4749, 'grad_norm': 0.00244783867557142, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:37<17:41, 3.68s/it] 45%|████▍ | 233/520 [14:41<17:37, 3.68s/it] {'loss': 1.3506, 'grad_norm': 0.002153529153615686, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:41<17:37, 3.68s/it] 45%|████▌ | 234/520 [14:45<17:33, 3.68s/it] {'loss': 1.2082, 'grad_norm': 0.001994271594686673, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:45<17:33, 3.68s/it] 45%|████▌ | 235/520 [14:48<17:36, 3.71s/it] {'loss': 1.2717, 'grad_norm': 0.0021098027803143305, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:48<17:36, 3.71s/it] 45%|████▌ | 236/520 [14:52<17:50, 3.77s/it] {'loss': 1.3703, 'grad_norm': 0.001849131829183831, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:52<17:50, 3.77s/it] 46%|████▌ | 237/520 [14:56<17:55, 3.80s/it] {'loss': 1.345, 'grad_norm': 0.001984802927565301, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:56<17:55, 3.80s/it] 46%|████▌ | 238/520 [15:00<17:59, 3.83s/it] {'loss': 1.2756, 'grad_norm': 0.001988005142275661, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:00<17:59, 3.83s/it] 46%|████▌ | 239/520 [15:04<17:56, 3.83s/it] {'loss': 1.3728, 'grad_norm': 0.002037817265043066, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:04<17:56, 3.83s/it] 46%|████▌ | 240/520 [15:08<17:57, 3.85s/it] {'loss': 1.1504, 'grad_norm': 0.001998569633296665, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:08<17:57, 3.85s/it] 46%|████▋ | 241/520 [15:12<17:57, 3.86s/it] {'loss': 1.2394, 'grad_norm': 0.001857560642988802, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:12<17:57, 3.86s/it] 47%|████▋ | 242/520 [15:16<17:45, 3.83s/it] {'loss': 1.2601, 'grad_norm': 0.0018114554358385417, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:16<17:45, 3.83s/it] 47%|████▋ | 243/520 [15:19<17:27, 3.78s/it] {'loss': 1.2459, 'grad_norm': 0.0019078581243754968, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:19<17:27, 3.78s/it] 47%|████▋ | 244/520 [15:23<17:11, 3.74s/it] {'loss': 1.3863, 'grad_norm': 0.0020525669236729368, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:23<17:11, 3.74s/it] 47%|████▋ | 245/520 [15:26<17:02, 3.72s/it] {'loss': 1.2369, 'grad_norm': 0.002049334157659799, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:26<17:02, 3.72s/it] 47%|████▋ | 246/520 [15:30<16:53, 3.70s/it] {'loss': 1.4447, 'grad_norm': 0.0021780716100262953, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:30<16:53, 3.70s/it] 48%|████▊ | 247/520 [15:34<16:46, 3.69s/it] {'loss': 1.4177, 'grad_norm': 0.001985195768074247, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:34<16:46, 3.69s/it] 48%|████▊ | 248/520 [15:37<16:38, 3.67s/it] {'loss': 1.2419, 'grad_norm': 0.002170386174286417, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:37<16:38, 3.67s/it] 48%|████▊ | 249/520 [15:41<16:41, 3.70s/it] {'loss': 1.3411, 'grad_norm': 0.001974101189214741, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:41<16:41, 3.70s/it] 48%|████▊ | 250/520 [15:45<16:36, 3.69s/it] {'loss': 1.2766, 'grad_norm': 0.002001091822278092, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:45<16:36, 3.69s/it] 48%|████▊ | 251/520 [15:49<16:30, 3.68s/it] {'loss': 1.3444, 'grad_norm': 0.001836774213038678, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:49<16:30, 3.68s/it] 48%|████▊ | 252/520 [15:52<16:27, 3.68s/it] {'loss': 1.3377, 'grad_norm': 0.0020046261195653045, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:52<16:27, 3.68s/it] 49%|████▊ | 253/520 [15:56<16:20, 3.67s/it] {'loss': 1.3454, 'grad_norm': 0.002213951364336443, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:56<16:20, 3.67s/it] 49%|████▉ | 254/520 [16:00<16:16, 3.67s/it] {'loss': 1.265, 'grad_norm': 0.001930793122917883, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:00<16:16, 3.67s/it] 49%|████▉ | 255/520 [16:03<16:12, 3.67s/it] {'loss': 1.2752, 'grad_norm': 0.002123565725700029, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:03<16:12, 3.67s/it] 49%|████▉ | 256/520 [16:07<16:05, 3.66s/it] {'loss': 1.3195, 'grad_norm': 0.0020035436883537867, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:07<16:05, 3.66s/it] 49%|████▉ | 257/520 [16:10<16:02, 3.66s/it] {'loss': 1.3104, 'grad_norm': 0.0019838528942905114, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:10<16:02, 3.66s/it] 50%|████▉ | 258/520 [16:14<15:56, 3.65s/it] {'loss': 1.3186, 'grad_norm': 0.0016935835275143705, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:14<15:56, 3.65s/it] 50%|████▉ | 259/520 [16:18<15:51, 3.65s/it] {'loss': 1.3903, 'grad_norm': 0.002264807468178733, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:18<15:51, 3.65s/it] 50%|█████ | 260/520 [16:21<15:48, 3.65s/it] {'loss': 1.4062, 'grad_norm': 0.001993227059877275, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:21<15:48, 3.65s/it] 50%|█████ | 261/520 [16:25<15:44, 3.65s/it] {'loss': 1.3315, 'grad_norm': 0.0022126824517788423, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:25<15:44, 3.65s/it] 50%|█████ | 262/520 [16:29<15:41, 3.65s/it] {'loss': 1.232, 'grad_norm': 0.0021768462386496713, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:29<15:41, 3.65s/it] 51%|█████ | 263/520 [16:32<15:39, 3.66s/it] {'loss': 1.3408, 'grad_norm': 0.0020970917807375342, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:32<15:39, 3.66s/it] 51%|█████ | 264/520 [16:36<15:35, 3.66s/it] {'loss': 1.343, 'grad_norm': 0.0019111657280725832, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:36<15:35, 3.66s/it] 51%|█████ | 265/520 [16:40<15:33, 3.66s/it] {'loss': 1.2468, 'grad_norm': 0.002178378636567238, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:40<15:33, 3.66s/it] 51%|█████ | 266/520 [16:43<15:26, 3.65s/it] {'loss': 1.1016, 'grad_norm': 0.0016708991496038456, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:43<15:26, 3.65s/it] 51%|█████▏ | 267/520 [16:47<15:22, 3.65s/it] {'loss': 1.2443, 'grad_norm': 0.0017486543466030985, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:47<15:22, 3.65s/it] 52%|█████▏ | 268/520 [16:51<15:18, 3.64s/it] {'loss': 1.471, 'grad_norm': 0.0024106476085211867, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:51<15:18, 3.64s/it] 52%|█████▏ | 269/520 [16:54<15:13, 3.64s/it] {'loss': 1.3591, 'grad_norm': 0.0019393466167111086, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:54<15:13, 3.64s/it] 52%|█████▏ | 270/520 [16:58<15:17, 3.67s/it] {'loss': 1.2556, 'grad_norm': 0.0018129052035644965, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:58<15:17, 3.67s/it] 52%|█████▏ | 271/520 [17:02<15:14, 3.67s/it] {'loss': 1.3416, 'grad_norm': 0.001979152644654532, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:02<15:14, 3.67s/it] 52%|█████▏ | 272/520 [17:05<15:12, 3.68s/it] {'loss': 1.2787, 'grad_norm': 0.00205414126732391, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:05<15:12, 3.68s/it] 52%|█████▎ | 273/520 [17:09<15:09, 3.68s/it] {'loss': 1.439, 'grad_norm': 0.0023568554270327513, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:09<15:09, 3.68s/it] 53%|█████▎ | 274/520 [17:13<15:04, 3.67s/it] {'loss': 1.2983, 'grad_norm': 0.0020423072484252723, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:13<15:04, 3.67s/it] 53%|█████▎ | 275/520 [17:16<14:59, 3.67s/it] {'loss': 1.2486, 'grad_norm': 0.0020362182834693945, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:16<14:59, 3.67s/it] 53%|█████▎ | 276/520 [17:20<14:53, 3.66s/it] {'loss': 1.3271, 'grad_norm': 0.0022768366935737457, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:20<14:53, 3.66s/it] 53%|█████▎ | 277/520 [17:24<14:49, 3.66s/it] {'loss': 1.3995, 'grad_norm': 0.001970059950648353, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:24<14:49, 3.66s/it] 53%|█████▎ | 278/520 [17:27<14:45, 3.66s/it] {'loss': 1.1907, 'grad_norm': 0.0018075584019753635, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:27<14:45, 3.66s/it] 54%|█████▎ | 279/520 [17:31<14:53, 3.71s/it] {'loss': 1.2865, 'grad_norm': 0.00221723878620119, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:31<14:53, 3.71s/it] 54%|█████▍ | 280/520 [17:35<15:04, 3.77s/it] {'loss': 1.256, 'grad_norm': 0.002412690340927973, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:35<15:04, 3.77s/it] 54%|█████▍ | 281/520 [17:39<15:01, 3.77s/it] {'loss': 1.3587, 'grad_norm': 0.0020803615693268814, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:39<15:01, 3.77s/it] 54%|█████▍ | 282/520 [17:42<14:48, 3.73s/it] {'loss': 1.2089, 'grad_norm': 0.0018063701441169502, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:42<14:48, 3.73s/it] 54%|█████▍ | 283/520 [17:46<14:40, 3.72s/it] {'loss': 1.3827, 'grad_norm': 0.0021319096175043597, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:46<14:40, 3.72s/it] 55%|█████▍ | 284/520 [17:50<14:41, 3.73s/it] {'loss': 1.2645, 'grad_norm': 0.002197466577036045, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:50<14:41, 3.73s/it] 55%|█████▍ | 285/520 [17:54<14:45, 3.77s/it] {'loss': 1.2357, 'grad_norm': 0.0020371606455434747, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:54<14:45, 3.77s/it] 55%|█████▌ | 286/520 [17:58<14:57, 3.84s/it] {'loss': 1.106, 'grad_norm': 0.002066080984932085, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:58<14:57, 3.84s/it] 55%|█████▌ | 287/520 [18:02<15:14, 3.93s/it] {'loss': 1.3519, 'grad_norm': 0.002011586507202702, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:02<15:14, 3.93s/it] 55%|█████▌ | 288/520 [18:06<15:12, 3.93s/it] {'loss': 1.3918, 'grad_norm': 0.0019517462112018066, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:06<15:12, 3.93s/it] 56%|█████▌ | 289/520 [18:10<15:19, 3.98s/it] {'loss': 1.2505, 'grad_norm': 0.0018132218091901407, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:10<15:19, 3.98s/it] 56%|█████▌ | 290/520 [18:14<15:06, 3.94s/it] {'loss': 1.1715, 'grad_norm': 0.0017688042148743404, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:14<15:06, 3.94s/it] 56%|█████▌ | 291/520 [18:18<14:57, 3.92s/it] {'loss': 1.2277, 'grad_norm': 0.0020568592608878047, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:18<14:57, 3.92s/it] 56%|█████▌ | 292/520 [18:22<14:50, 3.91s/it] {'loss': 1.2786, 'grad_norm': 0.0018883635305192077, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:22<14:50, 3.91s/it] 56%|█████▋ | 293/520 [18:25<14:44, 3.90s/it] {'loss': 1.2166, 'grad_norm': 0.0020483886868693886, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:25<14:44, 3.90s/it] 57%|█████▋ | 294/520 [18:29<14:40, 3.90s/it] {'loss': 1.2495, 'grad_norm': 0.0020473291330094716, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:29<14:40, 3.90s/it] 57%|█████▋ | 295/520 [18:33<14:38, 3.90s/it] {'loss': 1.3472, 'grad_norm': 0.002028157307653767, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:33<14:38, 3.90s/it] 57%|█████▋ | 296/520 [18:37<14:31, 3.89s/it] {'loss': 1.1914, 'grad_norm': 0.0019988087743134228, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:37<14:31, 3.89s/it] 57%|█████▋ | 297/520 [18:41<14:25, 3.88s/it] {'loss': 1.3253, 'grad_norm': 0.002183687998900547, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:41<14:25, 3.88s/it] 57%|█████▋ | 298/520 [18:45<14:30, 3.92s/it] {'loss': 1.2877, 'grad_norm': 0.0017372479382809982, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:45<14:30, 3.92s/it] 57%|█████▊ | 299/520 [18:49<14:40, 3.98s/it] {'loss': 1.3428, 'grad_norm': 0.0017898561333558553, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:49<14:40, 3.98s/it] 58%|█████▊ | 300/520 [18:53<14:45, 4.03s/it] {'loss': 1.3418, 'grad_norm': 0.0019487076337904431, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:53<14:45, 4.03s/it] 58%|█████▊ | 301/520 [18:57<14:47, 4.05s/it] {'loss': 1.3129, 'grad_norm': 0.001964686140704943, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:57<14:47, 4.05s/it] 58%|█████▊ | 302/520 [19:01<14:48, 4.07s/it] {'loss': 1.3623, 'grad_norm': 0.0019367015063266406, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:01<14:48, 4.07s/it] 58%|█████▊ | 303/520 [19:06<14:47, 4.09s/it] {'loss': 1.2512, 'grad_norm': 0.0021872651943848024, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:06<14:47, 4.09s/it] 58%|█████▊ | 304/520 [19:10<14:50, 4.12s/it] {'loss': 1.2588, 'grad_norm': 0.0020302889518171276, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:10<14:50, 4.12s/it] 59%|█████▊ | 305/520 [19:14<14:49, 4.14s/it] {'loss': 1.3585, 'grad_norm': 0.0022264079968031224, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:14<14:49, 4.14s/it] 59%|█████▉ | 306/520 [19:18<14:48, 4.15s/it] {'loss': 1.299, 'grad_norm': 0.0020001068019679484, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:18<14:48, 4.15s/it] 59%|█████▉ | 307/520 [19:23<14:59, 4.22s/it] {'loss': 1.2262, 'grad_norm': 0.0017255658061158178, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:23<14:59, 4.22s/it] 59%|█████▉ | 308/520 [19:26<14:33, 4.12s/it] {'loss': 1.3506, 'grad_norm': 0.0020803259878531934, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:26<14:33, 4.12s/it] 59%|█████▉ | 309/520 [19:30<14:09, 4.03s/it] {'loss': 1.2258, 'grad_norm': 0.0017752905018079052, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:30<14:09, 4.03s/it] 60%|█████▉ | 310/520 [19:34<13:47, 3.94s/it] {'loss': 1.2095, 'grad_norm': 0.0017973923431961474, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:34<13:47, 3.94s/it] 60%|█████▉ | 311/520 [19:38<13:26, 3.86s/it] {'loss': 1.1795, 'grad_norm': 0.001891489593815121, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:38<13:26, 3.86s/it] 60%|██████ | 312/520 [19:41<13:11, 3.80s/it] {'loss': 1.168, 'grad_norm': 0.0020118909646347147, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:41<13:11, 3.80s/it] 60%|██████ | 313/520 [19:45<12:58, 3.76s/it] {'loss': 1.1628, 'grad_norm': 0.0017002669671706287, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:45<12:58, 3.76s/it] 60%|██████ | 314/520 [19:49<13:18, 3.88s/it] {'loss': 1.1971, 'grad_norm': 0.0017331379446996397, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:49<13:18, 3.88s/it] 61%|██████ | 315/520 [19:53<13:02, 3.82s/it] {'loss': 1.3275, 'grad_norm': 0.0024286739094007406, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:53<13:02, 3.82s/it] 61%|██████ | 316/520 [19:57<13:18, 3.91s/it] {'loss': 1.1758, 'grad_norm': 0.0022799186881896382, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:57<13:18, 3.91s/it] 61%|██████ | 317/520 [20:01<13:00, 3.85s/it] {'loss': 1.189, 'grad_norm': 0.0017183078976020151, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:01<13:00, 3.85s/it] 61%|██████ | 318/520 [20:04<12:46, 3.79s/it] {'loss': 1.314, 'grad_norm': 0.00200037636046494, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:04<12:46, 3.79s/it] 61%|██████▏ | 319/520 [20:08<12:56, 3.86s/it] {'loss': 1.1773, 'grad_norm': 0.001916189333213554, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:08<12:56, 3.86s/it] 62%|██████▏ | 320/520 [20:12<12:41, 3.81s/it] {'loss': 1.1196, 'grad_norm': 0.0020383490371939064, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:12<12:41, 3.81s/it] 62%|██████▏ | 321/520 [20:16<12:33, 3.79s/it] {'loss': 1.3213, 'grad_norm': 0.0020203964050047553, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:16<12:33, 3.79s/it] 62%|██████▏ | 322/520 [20:19<12:26, 3.77s/it] {'loss': 1.2033, 'grad_norm': 0.002056422522384545, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:19<12:26, 3.77s/it] 62%|██████▏ | 323/520 [20:23<12:17, 3.74s/it] {'loss': 1.2656, 'grad_norm': 0.002082060389278118, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:23<12:17, 3.74s/it] 62%|██████▏ | 324/520 [20:27<12:10, 3.73s/it] {'loss': 1.2538, 'grad_norm': 0.0020809974503113253, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:27<12:10, 3.73s/it] 62%|██████▎ | 325/520 [20:31<12:08, 3.74s/it] {'loss': 1.2667, 'grad_norm': 0.0019708965487748053, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:31<12:08, 3.74s/it] 63%|██████▎ | 326/520 [20:34<12:01, 3.72s/it] {'loss': 1.2534, 'grad_norm': 0.0018552145416375675, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:34<12:01, 3.72s/it] 63%|██████▎ | 327/520 [20:38<11:56, 3.71s/it] {'loss': 1.3371, 'grad_norm': 0.0022590247772406222, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:38<11:56, 3.71s/it] 63%|██████▎ | 328/520 [20:42<11:52, 3.71s/it] {'loss': 1.3162, 'grad_norm': 0.0019985184020381197, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:42<11:52, 3.71s/it] 63%|██████▎ | 329/520 [20:45<11:48, 3.71s/it] {'loss': 1.1703, 'grad_norm': 0.0016685589333300683, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:45<11:48, 3.71s/it] 63%|██████▎ | 330/520 [20:49<11:44, 3.71s/it] {'loss': 1.2521, 'grad_norm': 0.0017252206935055266, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:49<11:44, 3.71s/it] 64%|██████▎ | 331/520 [20:53<11:48, 3.75s/it] {'loss': 1.2076, 'grad_norm': 0.0018199590172224597, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:53<11:48, 3.75s/it] 64%|██████▍ | 332/520 [20:57<11:44, 3.75s/it] {'loss': 1.3527, 'grad_norm': 0.0019073596484241286, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:57<11:44, 3.75s/it] 64%|██████▍ | 333/520 [21:00<11:36, 3.73s/it] {'loss': 1.3672, 'grad_norm': 0.0021455010302161978, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:00<11:36, 3.73s/it] 64%|██████▍ | 334/520 [21:04<11:32, 3.73s/it] {'loss': 1.2564, 'grad_norm': 0.002174941809996365, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:04<11:32, 3.73s/it] 64%|██████▍ | 335/520 [21:08<11:29, 3.73s/it] {'loss': 1.2514, 'grad_norm': 0.0016996441651939513, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:08<11:29, 3.73s/it] 65%|██████▍ | 336/520 [21:11<11:23, 3.72s/it] {'loss': 1.142, 'grad_norm': 0.002077639516877407, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:11<11:23, 3.72s/it] 65%|██████▍ | 337/520 [21:15<11:16, 3.70s/it] {'loss': 1.1412, 'grad_norm': 0.0019528980167679805, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:15<11:16, 3.70s/it] 65%|██████▌ | 338/520 [21:19<11:11, 3.69s/it] {'loss': 1.2655, 'grad_norm': 0.0018425380103877281, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:19<11:11, 3.69s/it] 65%|██████▌ | 339/520 [21:22<11:06, 3.68s/it] {'loss': 1.203, 'grad_norm': 0.001866687873845366, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:22<11:06, 3.68s/it] 65%|██████▌ | 340/520 [21:26<11:02, 3.68s/it] {'loss': 1.1971, 'grad_norm': 0.001829848749703501, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:26<11:02, 3.68s/it] 66%|██████▌ | 341/520 [21:30<10:57, 3.67s/it] {'loss': 1.2126, 'grad_norm': 0.0019119781431730456, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:30<10:57, 3.67s/it] 66%|██████▌ | 342/520 [21:33<10:54, 3.68s/it] {'loss': 1.3127, 'grad_norm': 0.0022941402203692438, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:34<10:54, 3.68s/it] 66%|██████▌ | 343/520 [21:37<10:51, 3.68s/it] {'loss': 1.2653, 'grad_norm': 0.0018938163170354764, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:37<10:51, 3.68s/it] 66%|██████▌ | 344/520 [21:41<10:47, 3.68s/it] {'loss': 1.1679, 'grad_norm': 0.001916456396642199, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:41<10:47, 3.68s/it] 66%|██████▋ | 345/520 [21:45<10:42, 3.67s/it] {'loss': 1.2903, 'grad_norm': 0.0021434545050638726, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:45<10:42, 3.67s/it] 67%|██████▋ | 346/520 [21:48<10:39, 3.67s/it] {'loss': 1.269, 'grad_norm': 0.0018280968720422106, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:48<10:39, 3.67s/it] 67%|██████▋ | 347/520 [21:52<10:37, 3.68s/it] {'loss': 1.1871, 'grad_norm': 0.0017776703934612769, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:52<10:37, 3.68s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:56<10:32, 3.68s/it] {'loss': 1.1443, 'grad_norm': 0.002220646887112996, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:56<10:32, 3.68s/it] 67%|██████▋ | 349/520 [21:59<10:29, 3.68s/it] {'loss': 1.1933, 'grad_norm': 0.0020438081724174764, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:59<10:29, 3.68s/it] 67%|██████▋ | 350/520 [22:03<10:24, 3.68s/it] {'loss': 1.231, 'grad_norm': 0.0020406265844067524, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:03<10:24, 3.68s/it] 68%|██████▊ | 351/520 [22:07<10:21, 3.68s/it] {'loss': 1.1368, 'grad_norm': 0.0018243035810807005, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:07<10:21, 3.68s/it] 68%|██████▊ | 352/520 [22:10<10:19, 3.69s/it] {'loss': 1.259, 'grad_norm': 0.001816800417844988, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:10<10:19, 3.69s/it] 68%|██████▊ | 353/520 [22:14<10:17, 3.70s/it] {'loss': 1.2151, 'grad_norm': 0.0016637350592837584, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:14<10:17, 3.70s/it] 68%|██████▊ | 354/520 [22:18<10:14, 3.70s/it] {'loss': 1.3544, 'grad_norm': 0.0018917624611430636, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:18<10:14, 3.70s/it] 68%|██████▊ | 355/520 [22:21<10:12, 3.71s/it] {'loss': 1.1969, 'grad_norm': 0.0018979698236740433, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:21<10:12, 3.71s/it] 68%|██████▊ | 356/520 [22:25<10:07, 3.70s/it] {'loss': 1.1924, 'grad_norm': 0.0018983561143635936, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:25<10:07, 3.70s/it] 69%|██████▊ | 357/520 [22:29<10:02, 3.70s/it] {'loss': 1.222, 'grad_norm': 0.0017327463646312912, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:29<10:02, 3.70s/it] 69%|██████▉ | 358/520 [22:33<09:58, 3.70s/it] {'loss': 1.1548, 'grad_norm': 0.001877920170380068, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:33<09:58, 3.70s/it] 69%|██████▉ | 359/520 [22:36<09:53, 3.69s/it] {'loss': 1.2826, 'grad_norm': 0.001990078084395745, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:36<09:53, 3.69s/it] 69%|██████▉ | 360/520 [22:40<09:51, 3.70s/it] {'loss': 1.3106, 'grad_norm': 0.0029929720138892274, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:40<09:51, 3.70s/it] 69%|██████▉ | 361/520 [22:44<09:58, 3.76s/it] {'loss': 1.288, 'grad_norm': 0.0017400863380176229, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:44<09:58, 3.76s/it] 70%|██████▉ | 362/520 [22:48<09:53, 3.75s/it] {'loss': 1.2186, 'grad_norm': 0.00199765748073601, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:48<09:53, 3.75s/it] 70%|██████▉ | 363/520 [22:51<09:46, 3.73s/it] {'loss': 1.2343, 'grad_norm': 0.001791657814771769, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:51<09:46, 3.73s/it] 70%|███████ | 364/520 [22:55<09:42, 3.74s/it] {'loss': 1.3105, 'grad_norm': 0.001847077396301187, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:55<09:42, 3.74s/it] 70%|███████ | 365/520 [22:59<09:36, 3.72s/it] {'loss': 1.3044, 'grad_norm': 0.001980580660005314, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:59<09:36, 3.72s/it] 70%|███████ | 366/520 [23:02<09:31, 3.71s/it] {'loss': 1.252, 'grad_norm': 0.0017537544456580681, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:02<09:31, 3.71s/it] 71%|███████ | 367/520 [23:06<09:28, 3.72s/it] {'loss': 1.2479, 'grad_norm': 0.0017965972369351473, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:06<09:28, 3.72s/it] 71%|███████ | 368/520 [23:10<09:24, 3.71s/it] {'loss': 1.1058, 'grad_norm': 0.0019018212475663912, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:10<09:24, 3.71s/it] 71%|███████ | 369/520 [23:14<09:20, 3.71s/it] {'loss': 1.26, 'grad_norm': 0.0017512287180410536, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:14<09:20, 3.71s/it] 71%|███████ | 370/520 [23:17<09:16, 3.71s/it] {'loss': 1.1614, 'grad_norm': 0.0017554875903239388, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:17<09:16, 3.71s/it] 71%|███████▏ | 371/520 [23:21<09:12, 3.71s/it] {'loss': 1.1598, 'grad_norm': 0.001926337510239174, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:21<09:12, 3.71s/it] 72%|███████▏ | 372/520 [23:25<09:09, 3.71s/it] {'loss': 1.3666, 'grad_norm': 0.0021354935317963192, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:25<09:09, 3.71s/it] 72%|███████▏ | 373/520 [23:29<09:12, 3.76s/it] {'loss': 1.2509, 'grad_norm': 0.002849902953385812, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:29<09:12, 3.76s/it] 72%|███████▏ | 374/520 [23:32<09:11, 3.78s/it] {'loss': 1.2447, 'grad_norm': 0.0017982434022759924, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:32<09:11, 3.78s/it] 72%|███████▏ | 375/520 [23:36<09:11, 3.80s/it] {'loss': 1.1615, 'grad_norm': 0.001903270292931079, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:36<09:11, 3.80s/it] 72%|███████▏ | 376/520 [23:40<09:10, 3.83s/it] {'loss': 1.2779, 'grad_norm': 0.0017265231697567412, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:40<09:10, 3.83s/it] 72%|███████▎ | 377/520 [23:44<09:09, 3.84s/it] {'loss': 1.2164, 'grad_norm': 0.001892396627552514, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:44<09:09, 3.84s/it] 73%|███████▎ | 378/520 [23:48<09:05, 3.84s/it] {'loss': 1.2671, 'grad_norm': 0.0017697159738996996, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:48<09:05, 3.84s/it] 73%|███████▎ | 379/520 [23:52<09:02, 3.85s/it] {'loss': 1.2478, 'grad_norm': 0.0017525950428331804, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:52<09:02, 3.85s/it] 73%|███████▎ | 380/520 [23:55<08:57, 3.84s/it] {'loss': 1.3332, 'grad_norm': 0.002120123629399306, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:55<08:57, 3.84s/it] 73%|███████▎ | 381/520 [23:59<08:54, 3.85s/it] {'loss': 1.25, 'grad_norm': 0.0018789724484805164, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:59<08:54, 3.85s/it] 73%|███████▎ | 382/520 [24:03<08:53, 3.86s/it] {'loss': 1.2771, 'grad_norm': 0.0018553227256413122, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:03<08:53, 3.86s/it] 74%|███████▎ | 383/520 [24:07<08:49, 3.86s/it] {'loss': 1.0863, 'grad_norm': 0.0019328609533862976, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:07<08:49, 3.86s/it] 74%|███████▍ | 384/520 [24:11<08:46, 3.87s/it] {'loss': 1.3552, 'grad_norm': 0.001799439408127434, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:11<08:46, 3.87s/it] 74%|███████▍ | 385/520 [24:15<08:41, 3.86s/it] {'loss': 1.2223, 'grad_norm': 0.00168137272041478, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:15<08:41, 3.86s/it] 74%|███████▍ | 386/520 [24:19<08:37, 3.86s/it] {'loss': 1.1713, 'grad_norm': 0.0015830119430872186, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:19<08:37, 3.86s/it] 74%|███████▍ | 387/520 [24:23<08:32, 3.85s/it] {'loss': 1.3536, 'grad_norm': 0.0018206230918453262, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:23<08:32, 3.85s/it] 75%|███████▍ | 388/520 [24:26<08:29, 3.86s/it] {'loss': 1.1161, 'grad_norm': 0.0016966332701900807, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:26<08:29, 3.86s/it] 75%|███████▍ | 389/520 [24:30<08:27, 3.87s/it] {'loss': 1.179, 'grad_norm': 0.0020744360238883746, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:30<08:27, 3.87s/it] 75%|███████▌ | 390/520 [24:34<08:24, 3.88s/it] {'loss': 1.2457, 'grad_norm': 0.001770009245447004, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:34<08:24, 3.88s/it] 75%|███████▌ | 391/520 [24:38<08:21, 3.89s/it] {'loss': 1.3222, 'grad_norm': 0.0018495713754561453, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:38<08:21, 3.89s/it] 75%|███████▌ | 392/520 [24:42<08:16, 3.88s/it] {'loss': 1.127, 'grad_norm': 0.001747039029875237, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:42<08:16, 3.88s/it] 76%|███████▌ | 393/520 [24:46<08:11, 3.87s/it] {'loss': 1.1658, 'grad_norm': 0.001680467003945309, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:46<08:11, 3.87s/it] 76%|███████▌ | 394/520 [24:50<08:05, 3.86s/it] {'loss': 1.191, 'grad_norm': 0.0019138457369574102, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:50<08:05, 3.86s/it] 76%|███████▌ | 395/520 [24:54<08:02, 3.86s/it] {'loss': 1.158, 'grad_norm': 0.002057870648287025, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:54<08:02, 3.86s/it] 76%|███████▌ | 396/520 [24:57<07:58, 3.86s/it] {'loss': 1.2462, 'grad_norm': 0.001904932664449011, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:57<07:58, 3.86s/it] 76%|███████▋ | 397/520 [25:01<07:54, 3.85s/it] {'loss': 1.2265, 'grad_norm': 0.0017530174069754626, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:01<07:54, 3.85s/it] 77%|███████▋ | 398/520 [25:05<07:49, 3.85s/it] {'loss': 1.2272, 'grad_norm': 0.0018848919632219387, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:05<07:49, 3.85s/it] 77%|███████▋ | 399/520 [25:09<07:46, 3.86s/it] {'loss': 1.2119, 'grad_norm': 0.0018008147285720516, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:09<07:46, 3.86s/it] 77%|███████▋ | 400/520 [25:13<07:43, 3.87s/it] {'loss': 1.2697, 'grad_norm': 0.0023325457616486145, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:13<07:43, 3.87s/it] 77%|███████▋ | 401/520 [25:17<07:38, 3.86s/it] {'loss': 1.0461, 'grad_norm': 0.001983415900805257, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:17<07:38, 3.86s/it] 77%|███████▋ | 402/520 [25:20<07:28, 3.80s/it] {'loss': 1.1681, 'grad_norm': 0.001911526431035437, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:20<07:28, 3.80s/it] 78%|███████▊ | 403/520 [25:24<07:20, 3.76s/it] {'loss': 1.2025, 'grad_norm': 0.002020132656457429, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:24<07:20, 3.76s/it] 78%|███████▊ | 404/520 [25:28<07:13, 3.73s/it] {'loss': 1.106, 'grad_norm': 0.0022349436359462137, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:28<07:13, 3.73s/it] 78%|███████▊ | 405/520 [25:31<07:09, 3.73s/it] {'loss': 1.2178, 'grad_norm': 0.0018014225635608287, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:31<07:09, 3.73s/it] 78%|███████▊ | 406/520 [25:35<07:02, 3.71s/it] {'loss': 1.1478, 'grad_norm': 0.002156267327458803, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:35<07:02, 3.71s/it] 78%|███████▊ | 407/520 [25:39<06:58, 3.70s/it] {'loss': 1.2923, 'grad_norm': 0.0018631642647773618, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:39<06:58, 3.70s/it] 78%|███████▊ | 408/520 [25:42<06:52, 3.68s/it] {'loss': 1.1825, 'grad_norm': 0.0019396892626500565, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:42<06:52, 3.68s/it] 79%|███████▊ | 409/520 [25:46<06:48, 3.68s/it] {'loss': 1.313, 'grad_norm': 0.00206222710827162, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:46<06:48, 3.68s/it] 79%|███████▉ | 410/520 [25:50<06:44, 3.68s/it] {'loss': 1.0326, 'grad_norm': 0.001816373948568347, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:50<06:44, 3.68s/it] 79%|███████▉ | 411/520 [25:53<06:40, 3.67s/it] {'loss': 1.2919, 'grad_norm': 0.0022776011285913214, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:53<06:40, 3.67s/it] 79%|███████▉ | 412/520 [25:57<06:35, 3.66s/it] {'loss': 1.2004, 'grad_norm': 0.001849831307949254, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:57<06:35, 3.66s/it] 79%|███████▉ | 413/520 [26:01<06:32, 3.67s/it] {'loss': 1.2402, 'grad_norm': 0.0017583380491095557, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:01<06:32, 3.67s/it] 80%|███████▉ | 414/520 [26:04<06:29, 3.68s/it] {'loss': 1.0438, 'grad_norm': 0.0016075251932963562, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:04<06:29, 3.68s/it] 80%|███████▉ | 415/520 [26:08<06:27, 3.69s/it] {'loss': 1.1744, 'grad_norm': 0.0017368777721246849, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:08<06:27, 3.69s/it] 80%|████████ | 416/520 [26:12<06:23, 3.69s/it] {'loss': 1.0995, 'grad_norm': 0.0021432104795714294, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:12<06:23, 3.69s/it] 80%|████████ | 417/520 [26:15<06:18, 3.68s/it] {'loss': 1.259, 'grad_norm': 0.0020543969896700985, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:15<06:18, 3.68s/it] 80%|████████ | 418/520 [26:19<06:15, 3.68s/it] {'loss': 1.2458, 'grad_norm': 0.0018169381458742835, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:19<06:15, 3.68s/it] 81%|████████ | 419/520 [26:23<06:10, 3.67s/it] {'loss': 1.2298, 'grad_norm': 0.0019612166605817013, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:23<06:10, 3.67s/it] 81%|████████ | 420/520 [26:26<06:06, 3.66s/it] {'loss': 1.1225, 'grad_norm': 0.00207205975272304, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:26<06:06, 3.66s/it] 81%|████████ | 421/520 [26:30<06:02, 3.66s/it] {'loss': 1.0526, 'grad_norm': 0.0021271202591962263, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:30<06:02, 3.66s/it] 81%|████████ | 422/520 [26:34<05:59, 3.67s/it] {'loss': 1.1762, 'grad_norm': 0.0019213722292616405, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:34<05:59, 3.67s/it] 81%|████████▏ | 423/520 [26:37<05:56, 3.67s/it] {'loss': 1.1639, 'grad_norm': 0.002082811852206698, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:37<05:56, 3.67s/it] 82%|████████▏ | 424/520 [26:41<05:52, 3.67s/it] {'loss': 1.3264, 'grad_norm': 0.001951666182756888, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:41<05:52, 3.67s/it] 82%|████████▏ | 425/520 [26:45<05:47, 3.66s/it] {'loss': 1.1679, 'grad_norm': 0.00176204993355952, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:45<05:47, 3.66s/it] 82%|████████▏ | 426/520 [26:48<05:43, 3.65s/it] {'loss': 1.1918, 'grad_norm': 0.0024250988173721818, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:48<05:43, 3.65s/it] 82%|████████▏ | 427/520 [26:52<05:39, 3.65s/it] {'loss': 1.1047, 'grad_norm': 0.0017630364512830011, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:52<05:39, 3.65s/it] 82%|████████▏ | 428/520 [26:56<05:35, 3.65s/it] {'loss': 1.0839, 'grad_norm': 0.001883296428901718, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:56<05:35, 3.65s/it] 82%|████████▎ | 429/520 [26:59<05:32, 3.66s/it] {'loss': 1.1818, 'grad_norm': 0.0017732440641578188, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:59<05:32, 3.66s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:03<05:29, 3.66s/it] {'loss': 1.1817, 'grad_norm': 0.0016964904755185743, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:03<05:29, 3.66s/it] 83%|████████▎ | 431/520 [27:07<05:26, 3.67s/it] {'loss': 1.207, 'grad_norm': 0.0019136927290935083, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:07<05:26, 3.67s/it] 83%|████████▎ | 432/520 [27:10<05:23, 3.67s/it] {'loss': 1.0944, 'grad_norm': 0.0019367606881003996, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:10<05:23, 3.67s/it] 83%|████████▎ | 433/520 [27:14<05:19, 3.67s/it] {'loss': 1.2235, 'grad_norm': 0.001732219213818794, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:14<05:19, 3.67s/it] 83%|████████▎ | 434/520 [27:18<05:15, 3.67s/it] {'loss': 0.9695, 'grad_norm': 0.0018297055500903342, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:18<05:15, 3.67s/it] 84%|████████▎ | 435/520 [27:21<05:11, 3.66s/it] {'loss': 1.2698, 'grad_norm': 0.0024757422966437343, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:21<05:11, 3.66s/it] 84%|████████▍ | 436/520 [27:25<05:07, 3.66s/it] {'loss': 1.0545, 'grad_norm': 0.00186298188453351, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:25<05:07, 3.66s/it] 84%|████████▍ | 437/520 [27:29<05:04, 3.67s/it] {'loss': 1.2838, 'grad_norm': 0.001837752277876827, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:29<05:04, 3.67s/it] 84%|████████▍ | 438/520 [27:32<05:00, 3.67s/it] {'loss': 1.0924, 'grad_norm': 0.0017528715508755904, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:32<05:00, 3.67s/it] 84%|████████▍ | 439/520 [27:36<04:57, 3.67s/it] {'loss': 1.1845, 'grad_norm': 0.0015150006344515398, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:36<04:57, 3.67s/it] 85%|████████▍ | 440/520 [27:40<04:53, 3.67s/it] {'loss': 1.1395, 'grad_norm': 0.0018205810084406238, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:40<04:53, 3.67s/it] 85%|████████▍ | 441/520 [27:43<04:50, 3.67s/it] {'loss': 1.2065, 'grad_norm': 0.001722028406680507, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:43<04:50, 3.67s/it] 85%|████████▌ | 442/520 [27:47<04:46, 3.68s/it] {'loss': 1.1999, 'grad_norm': 0.002032370064221926, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:47<04:46, 3.68s/it] 85%|████████▌ | 443/520 [27:51<04:43, 3.68s/it] {'loss': 1.2169, 'grad_norm': 0.0018150650238418404, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:51<04:43, 3.68s/it] 85%|████████▌ | 444/520 [27:54<04:40, 3.69s/it] {'loss': 1.1804, 'grad_norm': 0.0016664571369711292, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:54<04:40, 3.69s/it] 86%|████████▌ | 445/520 [27:58<04:36, 3.68s/it] {'loss': 1.1004, 'grad_norm': 0.001790094515999101, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:58<04:36, 3.68s/it] 86%|████████▌ | 446/520 [28:02<04:32, 3.68s/it] {'loss': 1.2798, 'grad_norm': 0.0017182597771204162, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:02<04:32, 3.68s/it] 86%|████████▌ | 447/520 [28:06<04:28, 3.68s/it] {'loss': 1.1932, 'grad_norm': 0.001826820074783672, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:06<04:28, 3.68s/it] 86%|████████▌ | 448/520 [28:09<04:26, 3.70s/it] {'loss': 1.1709, 'grad_norm': 0.0018287903589615092, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:09<04:26, 3.70s/it] 86%|████████▋ | 449/520 [28:13<04:22, 3.69s/it] {'loss': 1.2424, 'grad_norm': 0.0019774773123503167, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:13<04:22, 3.69s/it] 87%|████████▋ | 450/520 [28:17<04:18, 3.69s/it] {'loss': 1.2101, 'grad_norm': 0.0018455429036620386, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:17<04:18, 3.69s/it] 87%|████████▋ | 451/520 [28:20<04:14, 3.69s/it] {'loss': 1.2016, 'grad_norm': 0.0018632043457754814, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:20<04:14, 3.69s/it] 87%|████████▋ | 452/520 [28:24<04:10, 3.68s/it] {'loss': 1.2753, 'grad_norm': 0.0017252020733517425, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:24<04:10, 3.68s/it] 87%|████████▋ | 453/520 [28:28<04:06, 3.69s/it] {'loss': 1.2558, 'grad_norm': 0.0017843842369118289, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:28<04:06, 3.69s/it] 87%|████████▋ | 454/520 [28:31<04:02, 3.68s/it] {'loss': 1.1142, 'grad_norm': 0.0019089437456828514, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:31<04:02, 3.68s/it] 88%|████████▊ | 455/520 [28:35<03:59, 3.69s/it] {'loss': 1.2516, 'grad_norm': 0.001836693173365124, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:35<03:59, 3.69s/it] 88%|████████▊ | 456/520 [28:39<03:56, 3.69s/it] {'loss': 1.1709, 'grad_norm': 0.0018926372330679267, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:39<03:56, 3.69s/it] 88%|████████▊ | 457/520 [28:42<03:52, 3.69s/it] {'loss': 1.1938, 'grad_norm': 0.0016630728695835764, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:42<03:52, 3.69s/it] 88%|████████▊ | 458/520 [28:46<03:48, 3.68s/it] {'loss': 1.3138, 'grad_norm': 0.0019381019140177111, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:46<03:48, 3.68s/it] 88%|████████▊ | 459/520 [28:50<03:45, 3.70s/it] {'loss': 1.2462, 'grad_norm': 0.0018203985734779925, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:50<03:45, 3.70s/it] 88%|████████▊ | 460/520 [28:53<03:41, 3.69s/it] {'loss': 1.1242, 'grad_norm': 0.0018831616628623466, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:54<03:41, 3.69s/it] 89%|████████▊ | 461/520 [28:57<03:37, 3.69s/it] {'loss': 1.2772, 'grad_norm': 0.0015163730239975868, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:57<03:37, 3.69s/it] 89%|████████▉ | 462/520 [29:01<03:33, 3.68s/it] {'loss': 1.3278, 'grad_norm': 0.0018787291495537771, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:01<03:33, 3.68s/it] 89%|████████▉ | 463/520 [29:05<03:29, 3.68s/it] {'loss': 1.0801, 'grad_norm': 0.0018901806736573296, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:05<03:29, 3.68s/it] 89%|████████▉ | 464/520 [29:08<03:26, 3.68s/it] {'loss': 1.2262, 'grad_norm': 0.0018686013787948777, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:08<03:26, 3.68s/it] 89%|████████▉ | 465/520 [29:12<03:22, 3.69s/it] {'loss': 1.3394, 'grad_norm': 0.0020467379755946358, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:12<03:22, 3.69s/it] 90%|████████▉ | 466/520 [29:16<03:18, 3.68s/it] {'loss': 1.206, 'grad_norm': 0.0016497130412913969, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:16<03:18, 3.68s/it] 90%|████████▉ | 467/520 [29:19<03:14, 3.67s/it] {'loss': 1.2167, 'grad_norm': 0.0017912969373659222, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:19<03:14, 3.67s/it] 90%|█████████ | 468/520 [29:23<03:10, 3.66s/it] {'loss': 1.1864, 'grad_norm': 0.0021249416346705683, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:23<03:10, 3.66s/it] 90%|█████████ | 469/520 [29:26<03:06, 3.65s/it] {'loss': 1.2452, 'grad_norm': 0.001954142174180751, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:27<03:06, 3.65s/it] 90%|█████████ | 470/520 [29:30<03:02, 3.65s/it] {'loss': 1.1218, 'grad_norm': 0.001666207628661347, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:30<03:02, 3.65s/it] 91%|█████████ | 471/520 [29:34<02:58, 3.64s/it] {'loss': 1.147, 'grad_norm': 0.0019365461480029987, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:34<02:58, 3.64s/it] 91%|█████████ | 472/520 [29:37<02:55, 3.66s/it] {'loss': 1.1141, 'grad_norm': 0.0018533066277213728, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:37<02:55, 3.66s/it] 91%|█████████ | 473/520 [29:41<02:52, 3.67s/it] {'loss': 1.1763, 'grad_norm': 0.0018066381910583557, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:41<02:52, 3.67s/it] 91%|█████████ | 474/520 [29:45<02:48, 3.66s/it] {'loss': 1.244, 'grad_norm': 0.0017179956941735, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:45<02:48, 3.66s/it] 91%|█████████▏| 475/520 [29:48<02:45, 3.67s/it] {'loss': 1.1641, 'grad_norm': 0.001753488605330174, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:48<02:45, 3.67s/it] 92%|█████████▏| 476/520 [29:52<02:41, 3.66s/it] {'loss': 1.1684, 'grad_norm': 0.0018959106666408595, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:52<02:41, 3.66s/it] 92%|█████████▏| 477/520 [29:56<02:37, 3.66s/it] {'loss': 1.1508, 'grad_norm': 0.0020585344156542605, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:56<02:37, 3.66s/it] 92%|█████████▏| 478/520 [29:59<02:33, 3.66s/it] {'loss': 1.1148, 'grad_norm': 0.001776738473308476, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:59<02:33, 3.66s/it] 92%|█████████▏| 479/520 [30:03<02:30, 3.66s/it] {'loss': 1.2233, 'grad_norm': 0.0018874322453897026, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:03<02:30, 3.66s/it] 92%|█████████▏| 480/520 [30:07<02:26, 3.66s/it] {'loss': 1.2414, 'grad_norm': 0.0016999354626680728, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:07<02:26, 3.66s/it] 92%|█████████▎| 481/520 [30:10<02:23, 3.67s/it] {'loss': 1.2491, 'grad_norm': 0.0017087909263610991, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:10<02:23, 3.67s/it] 93%|█████████▎| 482/520 [30:14<02:19, 3.68s/it] {'loss': 1.2473, 'grad_norm': 0.0018728563971540744, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:14<02:19, 3.68s/it] 93%|█████████▎| 483/520 [30:18<02:15, 3.67s/it] {'loss': 1.1825, 'grad_norm': 0.0019137263424817675, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:18<02:15, 3.67s/it] 93%|█████████▎| 484/520 [30:21<02:12, 3.67s/it] {'loss': 1.1907, 'grad_norm': 0.0018472800815134188, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:21<02:12, 3.67s/it] 93%|█████████▎| 485/520 [30:25<02:08, 3.67s/it] {'loss': 1.1369, 'grad_norm': 0.0017623131564040787, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:25<02:08, 3.67s/it] 93%|█████████▎| 486/520 [30:29<02:05, 3.70s/it] {'loss': 1.2644, 'grad_norm': 0.0018990117690755146, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:29<02:05, 3.70s/it] 94%|█████████▎| 487/520 [30:33<02:02, 3.70s/it] {'loss': 1.1094, 'grad_norm': 0.0017392467350722373, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:33<02:02, 3.70s/it] 94%|█████████▍| 488/520 [30:36<01:58, 3.69s/it] {'loss': 1.0562, 'grad_norm': 0.001891136727460155, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:36<01:58, 3.69s/it] 94%|█████████▍| 489/520 [30:40<01:54, 3.69s/it] {'loss': 1.2392, 'grad_norm': 0.001830734605401255, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:40<01:54, 3.69s/it] 94%|█████████▍| 490/520 [30:44<01:50, 3.68s/it] {'loss': 1.1798, 'grad_norm': 0.0019483830375175322, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:44<01:50, 3.68s/it] 94%|█████████▍| 491/520 [30:47<01:46, 3.67s/it] {'loss': 1.1422, 'grad_norm': 0.0018975266835044398, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:47<01:46, 3.67s/it] 95%|█████████▍| 492/520 [30:51<01:42, 3.68s/it] {'loss': 1.261, 'grad_norm': 0.0019701238589572225, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:51<01:42, 3.68s/it] 95%|█████████▍| 493/520 [30:55<01:39, 3.67s/it] {'loss': 1.2926, 'grad_norm': 0.0019866987192857033, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:55<01:39, 3.67s/it] 95%|█████████▌| 494/520 [30:58<01:35, 3.67s/it] {'loss': 1.2004, 'grad_norm': 0.0017515801276175328, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:58<01:35, 3.67s/it] 95%|█████████▌| 495/520 [31:02<01:31, 3.66s/it] {'loss': 1.1562, 'grad_norm': 0.0018105656060316824, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:02<01:31, 3.66s/it] 95%|█████████▌| 496/520 [31:06<01:27, 3.66s/it] {'loss': 1.076, 'grad_norm': 0.0019833644824976972, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:06<01:27, 3.66s/it] 96%|█████████▌| 497/520 [31:09<01:24, 3.67s/it] {'loss': 1.1847, 'grad_norm': 0.0016098334918533633, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:09<01:24, 3.67s/it] 96%|█████████▌| 498/520 [31:13<01:20, 3.66s/it] {'loss': 1.1546, 'grad_norm': 0.0018513345586392577, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:13<01:20, 3.66s/it] 96%|█████████▌| 499/520 [31:17<01:17, 3.68s/it] {'loss': 1.317, 'grad_norm': 0.0018362452779619586, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:17<01:17, 3.68s/it] 96%|█████████▌| 500/520 [31:20<01:13, 3.68s/it] {'loss': 1.2789, 'grad_norm': 0.0021759346088882727, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:20<01:13, 3.68s/it] 96%|█████████▋| 501/520 [31:24<01:09, 3.68s/it] {'loss': 1.2355, 'grad_norm': 0.002130365273134262, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:24<01:09, 3.68s/it] 97%|█████████▋| 502/520 [31:28<01:06, 3.70s/it] {'loss': 1.1952, 'grad_norm': 0.001727256045245562, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:28<01:06, 3.70s/it] 97%|█████████▋| 503/520 [31:32<01:03, 3.74s/it] {'loss': 1.2059, 'grad_norm': 0.001835803705978637, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:32<01:03, 3.74s/it] 97%|█████████▋| 504/520 [31:35<01:00, 3.77s/it] {'loss': 1.195, 'grad_norm': 0.00212988558138824, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:35<01:00, 3.77s/it] 97%|█████████▋| 505/520 [31:39<00:57, 3.81s/it] {'loss': 1.229, 'grad_norm': 0.0018682966532791087, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:39<00:57, 3.81s/it] 97%|█████████▋| 506/520 [31:43<00:53, 3.81s/it] {'loss': 1.1468, 'grad_norm': 0.0019213410676648653, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:43<00:53, 3.81s/it] 98%|█████████▊| 507/520 [31:47<00:49, 3.80s/it] {'loss': 1.3609, 'grad_norm': 0.0017774866600162337, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:47<00:49, 3.80s/it] 98%|█████████▊| 508/520 [31:51<00:45, 3.76s/it] {'loss': 1.2656, 'grad_norm': 0.001815517597287324, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:51<00:45, 3.76s/it] 98%|█████████▊| 509/520 [31:54<00:41, 3.73s/it] {'loss': 1.2353, 'grad_norm': 0.0017480383089733481, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:54<00:41, 3.73s/it] 98%|█████████▊| 510/520 [31:58<00:37, 3.71s/it] {'loss': 1.1909, 'grad_norm': 0.0017855829438749242, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:58<00:37, 3.71s/it] 98%|█████████▊| 511/520 [32:02<00:33, 3.69s/it] {'loss': 1.1598, 'grad_norm': 0.001717201741510688, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:02<00:33, 3.69s/it] 98%|█████████▊| 512/520 [32:05<00:29, 3.68s/it] {'loss': 1.0443, 'grad_norm': 0.0018129473212823725, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:05<00:29, 3.68s/it] 99%|█████████▊| 513/520 [32:09<00:25, 3.70s/it] {'loss': 1.247, 'grad_norm': 0.0019965691429435664, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:09<00:25, 3.70s/it] 99%|█████████▉| 514/520 [32:13<00:22, 3.69s/it] {'loss': 1.2183, 'grad_norm': 0.001737038599118599, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:13<00:22, 3.69s/it] 99%|█████████▉| 515/520 [32:16<00:18, 3.68s/it] {'loss': 1.27, 'grad_norm': 0.0021153952052175742, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:16<00:18, 3.68s/it] 99%|█████████▉| 516/520 [32:20<00:14, 3.68s/it] {'loss': 1.1643, 'grad_norm': 0.0018057682118342402, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:20<00:14, 3.68s/it] 99%|█████████▉| 517/520 [32:24<00:10, 3.66s/it] {'loss': 1.277, 'grad_norm': 0.001976676679407641, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:24<00:10, 3.66s/it] 100%|█████████▉| 518/520 [32:27<00:07, 3.64s/it] {'loss': 1.1857, 'grad_norm': 0.0019492945530571655, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:27<00:07, 3.64s/it] 100%|█████████▉| 519/520 [32:31<00:03, 3.64s/it] {'loss': 1.2134, 'grad_norm': 0.001801676276259821, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:31<00:03, 3.64s/it] 100%|██████████| 520/520 [32:35<00:00, 3.90s/it] {'loss': 1.2588, 'grad_norm': 0.0018133361064319262, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:35<00:00, 3.90s/it] {'train_runtime': 1955.7983, 'train_samples_per_second': 34.016, 'train_steps_per_second': 0.266, 'train_loss': 1.3176314881214728, 'epoch': 1.0} + 100%|██████████| 520/520 [32:35<00:00, 3.90s/it] 100%|██████████| 520/520 [32:35<00:00, 3.76s/it] +[2025-10-13 09:49:47,054] [INFO] [launch.py:348:main] Process 614877 exits successfully. +[2025-10-13 09:49:48,056] [INFO] [launch.py:348:main] Process 614884 exits successfully. +[2025-10-13 09:49:48,056] [INFO] [launch.py:348:main] Process 614878 exits successfully. +[2025-10-13 09:49:48,057] [INFO] [launch.py:348:main] Process 614882 exits successfully. +[2025-10-13 09:49:48,057] [INFO] [launch.py:348:main] Process 614874 exits successfully. +[2025-10-13 09:49:48,057] [INFO] [launch.py:348:main] Process 614883 exits successfully. +[2025-10-13 09:49:49,059] [INFO] [launch.py:348:main] Process 614879 exits successfully. +[2025-10-13 09:49:52,063] [INFO] [launch.py:348:main] Process 614873 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.7_2e-1_connector-3.0_1.7_2e-1_ablation_20251013_080601.log +Timestamp: 2025-10-13 09:49:54 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation_20251013_104850.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation_20251013_104850.log new file mode 100644 index 0000000000000000000000000000000000000000..ce5d17b7e9ff87a07809f7c424fbb52218c6dc35 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation_20251013_104850.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation_20251013_104850.log +Timestamp: 2025-10-13 10:48:50 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 10:48:52,879] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:48:56,456] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 10:48:56,457] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 1.9 --temperature_mlp_text 1.9 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 1.9 --temperature_mlp_vision 1.9 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 1.9 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 10:48:59,040] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:00,072] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 10:49:00,072] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 10:49:00,072] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 10:49:00,072] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 10:49:00,072] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 10:49:00,072] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 10:49:00,072] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 10:49:00,075] [INFO] [launch.py:253:main] process 694225 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 10:49:00,077] [INFO] [launch.py:253:main] process 694226 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 10:49:00,079] [INFO] [launch.py:253:main] process 694227 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 10:49:00,081] [INFO] [launch.py:253:main] process 694228 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 10:49:00,083] [INFO] [launch.py:253:main] process 694229 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 10:49:00,086] [INFO] [launch.py:253:main] process 694230 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 10:49:00,088] [INFO] [launch.py:253:main] process 694231 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 10:49:00,090] [INFO] [launch.py:253:main] process 694232 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 10:49:07,112] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:07,112] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:07,113] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:07,113] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:07,116] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:07,116] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:07,116] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:07,116] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 10:49:07,754] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 10:49:07,754] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 10:49:07,754] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 10:49:07,754] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 10:49:07,754] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 10:49:07,754] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 10:49:07,754] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 10:49:07,754] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 10:49:07,754] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.9, 'temperature_mlp': 1.9, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.9, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: Apply masks for the following modules: ['llm', 'connector']['llm', 'connector'] + +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.9, + "temperature_mlp": 1.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:694225:694225 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694225:694225 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694225:694225 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:694225:694225 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:694225:694225 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:694225:694225 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:694231:694231 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:694231:694231 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694231:694231 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694231:694231 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:694231:694231 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:694231:694231 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:694230:694230 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:694230:694230 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694230:694230 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694230:694230 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:694230:694230 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:694230:694230 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:694228:694228 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:694228:694228 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694228:694228 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694228:694228 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:694228:694228 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:694228:694228 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:694227:694227 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:694227:694227 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694227:694227 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694232:694232 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:694232:694232 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694227:694227 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:694232:694232 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694227:694227 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:694227:694227 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:694232:694232 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:694232:694232 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:694232:694232 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:694226:694226 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:694226:694226 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694229:694229 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:694226:694226 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694229:694229 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694229:694229 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694226:694226 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:694226:694226 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:694226:694226 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:694229:694229 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:694229:694229 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:694229:694229 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO ncclCommInitRank comm 0x55dc57004ae0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc413a18f09886877 - Init START +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO ncclCommInitRank comm 0x557ecf725c00 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc413a18f09886877 - Init START +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO ncclCommInitRank comm 0x559567b3aec0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc413a18f09886877 - Init START +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO ncclCommInitRank comm 0x5564a0f0a5f0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc413a18f09886877 - Init START +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO ncclCommInitRank comm 0x55865e24d360 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc413a18f09886877 - Init START +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO ncclCommInitRank comm 0x556b40162c80 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc413a18f09886877 - Init START +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO ncclCommInitRank comm 0x55742a9b83c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc413a18f09886877 - Init START +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO ncclCommInitRank comm 0x55a9e9edde10 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc413a18f09886877 - Init START +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO comm 0x55865e24d360 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO comm 0x556b40162c80 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO comm 0x55742a9b83c0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO comm 0x5564a0f0a5f0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO comm 0x55a9e9edde10 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO comm 0x557ecf725c00 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO comm 0x55dc57004ae0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO comm 0x559567b3aec0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:694232:695866 [7] NCCL INFO ncclCommInitRank comm 0x55742a9b83c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc413a18f09886877 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:694230:695863 [5] NCCL INFO ncclCommInitRank comm 0x55dc57004ae0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc413a18f09886877 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:694228:695864 [3] NCCL INFO ncclCommInitRank comm 0x556b40162c80 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc413a18f09886877 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694226:695867 [1] NCCL INFO ncclCommInitRank comm 0x5564a0f0a5f0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc413a18f09886877 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:694231:695862 [6] NCCL INFO ncclCommInitRank comm 0x557ecf725c00 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc413a18f09886877 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:694229:695868 [4] NCCL INFO ncclCommInitRank comm 0x559567b3aec0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc413a18f09886877 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:694225:695861 [0] NCCL INFO ncclCommInitRank comm 0x55a9e9edde10 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc413a18f09886877 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694227:695865 [2] NCCL INFO ncclCommInitRank comm 0x55865e24d360 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc413a18f09886877 - Init COMPLETE +[2025-10-13 10:49:49,269] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 10:49:52,873] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 10:57:15,278 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 10:57:15,282 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:002->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:694225:701021 [0] NCCL INFO ncclCommInitRank comm 0x7f16ec06b0c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe46bdbecf1c42f19 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694227:701028 [2] NCCL INFO ncclCommInitRank comm 0x7fae7806a5a0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe46bdbecf1c42f19 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694228:701024 [3] NCCL INFO ncclCommInitRank comm 0x7fbf7806afd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe46bdbecf1c42f19 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694229:701026 [4] NCCL INFO ncclCommInitRank comm 0x7f688006ac00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe46bdbecf1c42f19 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694226:701025 [1] NCCL INFO ncclCommInitRank comm 0x7f74f406ad20 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe46bdbecf1c42f19 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694232:701023 [7] NCCL INFO ncclCommInitRank comm 0x7f67b806b290 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe46bdbecf1c42f19 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694230:701022 [5] NCCL INFO ncclCommInitRank comm 0x7fcb3c06b200 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe46bdbecf1c42f19 - Init COMPLETE +ywang29-vrdb-test1-worker-0:694231:701027 [6] NCCL INFO ncclCommInitRank comm 0x7f88c806b0b0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe46bdbecf1c42f19 - Init COMPLETE + 0%| | 1/520 [00:14<2:02:04, 14.11s/it] {'loss': 2.7101, 'grad_norm': 0.08765786557533228, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:02:04, 14.11s/it] 0%| | 2/520 [00:17<1:08:27, 7.93s/it] {'loss': 2.5699, 'grad_norm': 0.0799213699481025, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:08:27, 7.93s/it] 1%| | 3/520 [00:21<51:28, 5.97s/it] {'loss': 1.9579, 'grad_norm': 0.024496889052876235, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:28, 5.97s/it] 1%| | 4/520 [00:24<43:25, 5.05s/it] {'loss': 1.7398, 'grad_norm': 0.011054673488501811, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:24<43:25, 5.05s/it] 1%| | 5/520 [00:28<39:01, 4.55s/it] {'loss': 1.8622, 'grad_norm': 0.02548665736430297, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:01, 4.55s/it] 1%| | 6/520 [00:32<36:25, 4.25s/it] {'loss': 1.6206, 'grad_norm': 0.012789463392401869, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:25, 4.25s/it] 1%|▏ | 7/520 [00:36<35:07, 4.11s/it] {'loss': 1.5362, 'grad_norm': 0.010776507014997121, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<35:07, 4.11s/it] 2%|▏ | 8/520 [00:40<36:01, 4.22s/it] {'loss': 1.5578, 'grad_norm': 0.006670104558274915, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<36:01, 4.22s/it] 2%|▏ | 9/520 [00:44<36:12, 4.25s/it] {'loss': 1.6484, 'grad_norm': 0.008002951513676566, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<36:12, 4.25s/it] 2%|▏ | 10/520 [00:48<34:32, 4.06s/it] {'loss': 1.4467, 'grad_norm': 0.005594209883354097, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:32, 4.06s/it] 2%|▏ | 11/520 [00:52<33:44, 3.98s/it] {'loss': 1.5404, 'grad_norm': 0.006850987806942219, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<33:44, 3.98s/it] 2%|▏ | 12/520 [00:56<32:53, 3.89s/it] {'loss': 1.472, 'grad_norm': 0.004991452652407834, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<32:53, 3.89s/it][2025-10-13 10:58:20,217] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<33:59, 4.02s/it] {'loss': 1.4657, 'grad_norm': 0.004282248155172662, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<33:59, 4.02s/it] 3%|▎ | 14/520 [01:04<32:56, 3.91s/it] {'loss': 1.5309, 'grad_norm': 0.005135270715431174, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:04<32:56, 3.91s/it] 3%|▎ | 15/520 [01:07<32:16, 3.83s/it] {'loss': 1.5307, 'grad_norm': 0.004034608488048391, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:16, 3.83s/it] 3%|▎ | 16/520 [01:11<32:01, 3.81s/it] {'loss': 1.4759, 'grad_norm': 0.0050642344798158, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<32:01, 3.81s/it] 3%|▎ | 17/520 [01:15<31:57, 3.81s/it] {'loss': 1.5767, 'grad_norm': 0.0038389097767747874, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<31:57, 3.81s/it] 3%|▎ | 18/520 [01:18<31:40, 3.79s/it] {'loss': 1.4285, 'grad_norm': 0.003891279003606019, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:40, 3.79s/it] 4%|▎ | 19/520 [01:22<31:34, 3.78s/it] {'loss': 1.5107, 'grad_norm': 0.003925740926903418, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<31:34, 3.78s/it] 4%|▍ | 20/520 [01:26<31:17, 3.75s/it] {'loss': 1.4455, 'grad_norm': 0.004177478211281597, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<31:17, 3.75s/it] 4%|▍ | 21/520 [01:30<31:04, 3.74s/it] {'loss': 1.5566, 'grad_norm': 0.00658417165707592, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<31:04, 3.74s/it] 4%|▍ | 22/520 [01:33<30:53, 3.72s/it] {'loss': 1.5937, 'grad_norm': 0.003413100267575722, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<30:53, 3.72s/it] 4%|▍ | 23/520 [01:37<30:40, 3.70s/it] {'loss': 1.5479, 'grad_norm': 0.004399600649865117, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:40, 3.70s/it] 5%|▍ | 24/520 [01:41<30:30, 3.69s/it] {'loss': 1.4876, 'grad_norm': 0.0037261955399284534, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:30, 3.69s/it] 5%|▍ | 25/520 [01:44<30:23, 3.68s/it] {'loss': 1.5372, 'grad_norm': 0.0037778681737388577, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:23, 3.68s/it] 5%|▌ | 26/520 [01:48<30:15, 3.67s/it] {'loss': 1.5224, 'grad_norm': 0.003227957910316965, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:15, 3.67s/it] 5%|▌ | 27/520 [01:52<30:09, 3.67s/it] {'loss': 1.4364, 'grad_norm': 0.0035812930204708834, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:09, 3.67s/it] 5%|▌ | 28/520 [01:55<30:12, 3.68s/it] {'loss': 1.4278, 'grad_norm': 0.003446185622419098, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:55<30:12, 3.68s/it] 6%|▌ | 29/520 [01:59<30:06, 3.68s/it] {'loss': 1.4377, 'grad_norm': 0.002970963338092195, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<30:06, 3.68s/it] 6%|▌ | 30/520 [02:03<29:59, 3.67s/it] {'loss': 1.5852, 'grad_norm': 0.0034496775433153796, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<29:59, 3.67s/it] 6%|▌ | 31/520 [02:06<29:59, 3.68s/it] {'loss': 1.4225, 'grad_norm': 0.0027388909665717292, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:06<29:59, 3.68s/it] 6%|▌ | 32/520 [02:10<29:56, 3.68s/it] {'loss': 1.5707, 'grad_norm': 0.007318574190499618, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:56, 3.68s/it] 6%|▋ | 33/520 [02:14<29:48, 3.67s/it] {'loss': 1.4338, 'grad_norm': 0.003218927314340755, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<29:48, 3.67s/it] 7%|▋ | 34/520 [02:17<29:47, 3.68s/it] {'loss': 1.4247, 'grad_norm': 0.0034521905864180577, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:17<29:47, 3.68s/it] 7%|▋ | 35/520 [02:21<29:39, 3.67s/it] {'loss': 1.4452, 'grad_norm': 0.003738868715595758, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<29:39, 3.67s/it] 7%|▋ | 36/520 [02:25<29:33, 3.66s/it] {'loss': 1.5492, 'grad_norm': 0.002636302979240525, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<29:33, 3.66s/it] 7%|▋ | 37/520 [02:28<29:30, 3.67s/it] {'loss': 1.6851, 'grad_norm': 0.00686550368547814, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:28<29:30, 3.67s/it] 7%|▋ | 38/520 [02:32<29:22, 3.66s/it] {'loss': 1.6229, 'grad_norm': 0.0028681396565226015, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:22, 3.66s/it] 8%|▊ | 39/520 [02:36<29:16, 3.65s/it] {'loss': 1.4505, 'grad_norm': 0.0035652420165521487, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:36<29:16, 3.65s/it] 8%|▊ | 40/520 [02:39<29:11, 3.65s/it] {'loss': 1.4992, 'grad_norm': 0.002782160039095499, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:39<29:11, 3.65s/it] 8%|▊ | 41/520 [02:43<29:09, 3.65s/it] {'loss': 1.4597, 'grad_norm': 0.0029018365807132523, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:09, 3.65s/it] 8%|▊ | 42/520 [02:47<29:11, 3.67s/it] {'loss': 1.49, 'grad_norm': 0.0037212320883757, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<29:11, 3.67s/it] 8%|▊ | 43/520 [02:51<29:38, 3.73s/it] {'loss': 1.4728, 'grad_norm': 0.0031763566852081628, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<29:38, 3.73s/it] 8%|▊ | 44/520 [02:54<29:50, 3.76s/it] {'loss': 1.6103, 'grad_norm': 0.0032242247175516686, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<29:50, 3.76s/it] 9%|▊ | 45/520 [02:58<29:54, 3.78s/it] {'loss': 1.4937, 'grad_norm': 0.003019138623664932, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<29:54, 3.78s/it] 9%|▉ | 46/520 [03:02<29:58, 3.79s/it] {'loss': 1.7015, 'grad_norm': 0.0032786421319191458, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<29:58, 3.79s/it] 9%|▉ | 47/520 [03:06<30:01, 3.81s/it] {'loss': 1.4823, 'grad_norm': 0.002928239094515038, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:06<30:01, 3.81s/it] 9%|▉ | 48/520 [03:10<29:58, 3.81s/it] {'loss': 1.448, 'grad_norm': 0.0031764383680399407, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:10<29:58, 3.81s/it] 9%|▉ | 49/520 [03:14<30:01, 3.83s/it] {'loss': 1.4905, 'grad_norm': 0.002760004409472173, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:14<30:01, 3.83s/it] 10%|▉ | 50/520 [03:17<29:57, 3.82s/it] {'loss': 1.4861, 'grad_norm': 0.0026745802298257385, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:17<29:57, 3.82s/it] 10%|▉ | 51/520 [03:21<29:53, 3.82s/it] {'loss': 1.4073, 'grad_norm': 0.0029760443719169657, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:21<29:53, 3.82s/it] 10%|█ | 52/520 [03:25<29:52, 3.83s/it] {'loss': 1.5479, 'grad_norm': 0.003062674687995376, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:25<29:52, 3.83s/it] 10%|█ | 53/520 [03:29<29:50, 3.83s/it] {'loss': 1.5413, 'grad_norm': 0.0030127051313332057, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:29<29:50, 3.83s/it] 10%|█ | 54/520 [03:33<29:44, 3.83s/it] {'loss': 1.4321, 'grad_norm': 0.003126155397216422, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<29:44, 3.83s/it] 11%|█ | 55/520 [03:37<29:45, 3.84s/it] {'loss': 1.418, 'grad_norm': 0.0029631908838595898, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<29:45, 3.84s/it] 11%|█ | 56/520 [03:40<29:42, 3.84s/it] {'loss': 1.5522, 'grad_norm': 0.0028975453855766536, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:40<29:42, 3.84s/it] 11%|█ | 57/520 [03:44<29:34, 3.83s/it] {'loss': 1.413, 'grad_norm': 0.004099468258083532, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:44<29:34, 3.83s/it] 11%|█ | 58/520 [03:48<29:29, 3.83s/it] {'loss': 1.5638, 'grad_norm': 0.0024188378860624427, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<29:29, 3.83s/it] 11%|█▏ | 59/520 [03:52<29:27, 3.84s/it] {'loss': 1.4377, 'grad_norm': 0.003413552458976199, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<29:27, 3.84s/it] 12%|█▏ | 60/520 [03:56<29:14, 3.81s/it] {'loss': 1.4973, 'grad_norm': 0.004690768903256192, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<29:14, 3.81s/it] 12%|█▏ | 61/520 [03:59<28:46, 3.76s/it] {'loss': 1.5789, 'grad_norm': 0.008218957265194194, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<28:46, 3.76s/it] 12%|█▏ | 62/520 [04:03<28:32, 3.74s/it] {'loss': 1.4656, 'grad_norm': 0.0031658142434148953, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:32, 3.74s/it] 12%|█▏ | 63/520 [04:07<28:22, 3.73s/it] {'loss': 1.4491, 'grad_norm': 0.0039687471284857905, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<28:22, 3.73s/it] 12%|█▏ | 64/520 [04:10<28:13, 3.71s/it] {'loss': 1.4892, 'grad_norm': 0.0028493129977533688, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:10<28:13, 3.71s/it] 12%|█▎ | 65/520 [04:14<28:06, 3.71s/it] {'loss': 1.4835, 'grad_norm': 0.002916552573779614, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<28:06, 3.71s/it] 13%|█▎ | 66/520 [04:18<28:03, 3.71s/it] {'loss': 1.4483, 'grad_norm': 0.004139090741010892, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:18<28:03, 3.71s/it] 13%|█▎ | 67/520 [04:21<27:54, 3.70s/it] {'loss': 1.3307, 'grad_norm': 0.0023684921647428117, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:21<27:54, 3.70s/it] 13%|█▎ | 68/520 [04:25<27:53, 3.70s/it] {'loss': 1.3886, 'grad_norm': 0.0025543029506767544, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:25<27:53, 3.70s/it] 13%|█▎ | 69/520 [04:29<27:48, 3.70s/it] {'loss': 1.3698, 'grad_norm': 0.004086080489019923, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:29<27:48, 3.70s/it] 13%|█▎ | 70/520 [04:32<27:41, 3.69s/it] {'loss': 1.4154, 'grad_norm': 0.002806320826203875, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:32<27:41, 3.69s/it] 14%|█▎ | 71/520 [04:36<27:37, 3.69s/it] {'loss': 1.3495, 'grad_norm': 0.002949947633301889, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:36<27:37, 3.69s/it] 14%|█▍ | 72/520 [04:40<27:44, 3.71s/it] {'loss': 1.4962, 'grad_norm': 0.003247068394832163, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:40<27:44, 3.71s/it] 14%|█▍ | 73/520 [04:44<27:44, 3.72s/it] {'loss': 1.3168, 'grad_norm': 0.002528064478905947, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:44<27:44, 3.72s/it] 14%|█▍ | 74/520 [04:47<27:30, 3.70s/it] {'loss': 1.4419, 'grad_norm': 0.0026476521777224907, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:47<27:30, 3.70s/it] 14%|█▍ | 75/520 [04:51<27:35, 3.72s/it] {'loss': 1.3372, 'grad_norm': 0.0028923419821442484, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:51<27:35, 3.72s/it] 15%|█▍ | 76/520 [04:55<27:54, 3.77s/it] {'loss': 1.7222, 'grad_norm': 0.009540648422747188, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:55<27:54, 3.77s/it] 15%|█▍ | 77/520 [04:59<28:06, 3.81s/it] {'loss': 1.2764, 'grad_norm': 0.002953668749731131, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:59<28:06, 3.81s/it] 15%|█▌ | 78/520 [05:03<28:10, 3.82s/it] {'loss': 1.3986, 'grad_norm': 0.002690507536117604, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:03<28:10, 3.82s/it] 15%|█▌ | 79/520 [05:07<28:14, 3.84s/it] {'loss': 1.3845, 'grad_norm': 0.002358376322109719, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:07<28:14, 3.84s/it] 15%|█▌ | 80/520 [05:11<28:16, 3.86s/it] {'loss': 1.7276, 'grad_norm': 0.006359838993653484, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:11<28:16, 3.86s/it] 16%|█▌ | 81/520 [05:14<28:19, 3.87s/it] {'loss': 1.5333, 'grad_norm': 0.0031255995059493143, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:14<28:19, 3.87s/it] 16%|█▌ | 82/520 [05:18<28:14, 3.87s/it] {'loss': 1.4634, 'grad_norm': 0.0025066235124154065, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:18<28:14, 3.87s/it] 16%|█▌ | 83/520 [05:22<28:11, 3.87s/it] {'loss': 1.4812, 'grad_norm': 0.002784628094318887, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:22<28:11, 3.87s/it] 16%|█▌ | 84/520 [05:26<28:06, 3.87s/it] {'loss': 1.4897, 'grad_norm': 0.0030341580516255484, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:26<28:06, 3.87s/it] 16%|█▋ | 85/520 [05:30<28:02, 3.87s/it] {'loss': 1.5116, 'grad_norm': 0.0028336025736062035, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:30<28:02, 3.87s/it] 17%|█▋ | 86/520 [05:34<27:34, 3.81s/it] {'loss': 1.5262, 'grad_norm': 0.0032729205588686895, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:34<27:34, 3.81s/it] 17%|█▋ | 87/520 [05:37<27:17, 3.78s/it] {'loss': 1.6537, 'grad_norm': 0.004021758773546894, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:37<27:17, 3.78s/it] 17%|█▋ | 88/520 [05:41<27:15, 3.79s/it] {'loss': 1.6237, 'grad_norm': 0.0042915374098603624, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:41<27:15, 3.79s/it] 17%|█▋ | 89/520 [05:45<27:16, 3.80s/it] {'loss': 1.4642, 'grad_norm': 0.003236878308816035, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:45<27:16, 3.80s/it] 17%|█▋ | 90/520 [05:49<27:12, 3.80s/it] {'loss': 1.3993, 'grad_norm': 0.00281550349234177, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:49<27:12, 3.80s/it] 18%|█▊ | 91/520 [05:52<27:10, 3.80s/it] {'loss': 1.4751, 'grad_norm': 0.0024354028478796815, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:53<27:10, 3.80s/it] 18%|█▊ | 92/520 [05:56<27:11, 3.81s/it] {'loss': 1.4153, 'grad_norm': 0.00269663818573682, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:56<27:11, 3.81s/it] 18%|█▊ | 93/520 [06:00<27:07, 3.81s/it] {'loss': 1.4291, 'grad_norm': 0.002825504982310256, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:00<27:07, 3.81s/it] 18%|█▊ | 94/520 [06:04<27:04, 3.81s/it] {'loss': 1.5261, 'grad_norm': 0.0029608096622056683, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:04<27:04, 3.81s/it] 18%|█▊ | 95/520 [06:08<26:56, 3.80s/it] {'loss': 1.3958, 'grad_norm': 0.00318843067985161, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:08<26:56, 3.80s/it] 18%|█▊ | 96/520 [06:12<26:58, 3.82s/it] {'loss': 1.4153, 'grad_norm': 0.002264706873990567, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:12<26:58, 3.82s/it] 19%|█▊ | 97/520 [06:15<26:55, 3.82s/it] {'loss': 1.379, 'grad_norm': 0.0028313530612527737, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:15<26:55, 3.82s/it] 19%|█▉ | 98/520 [06:19<26:55, 3.83s/it] {'loss': 1.3795, 'grad_norm': 0.0022385268586110492, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:19<26:55, 3.83s/it] 19%|█▉ | 99/520 [06:23<26:50, 3.82s/it] {'loss': 1.408, 'grad_norm': 0.002769021746156639, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:23<26:50, 3.82s/it] 19%|█▉ | 100/520 [06:27<26:43, 3.82s/it] {'loss': 1.5688, 'grad_norm': 0.003436366118132005, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:27<26:43, 3.82s/it] 19%|█▉ | 101/520 [06:31<26:38, 3.82s/it] {'loss': 1.3915, 'grad_norm': 0.002608465114312827, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:31<26:38, 3.82s/it] 20%|█▉ | 102/520 [06:35<26:34, 3.82s/it] {'loss': 1.4036, 'grad_norm': 0.0026842614524117403, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:35<26:34, 3.82s/it] 20%|█▉ | 103/520 [06:38<26:40, 3.84s/it] {'loss': 1.3257, 'grad_norm': 0.0022446740427856846, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:38<26:40, 3.84s/it] 20%|██ | 104/520 [06:42<26:33, 3.83s/it] {'loss': 1.4055, 'grad_norm': 0.002493836473455324, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:42<26:33, 3.83s/it] 20%|██ | 105/520 [06:46<26:27, 3.82s/it] {'loss': 1.3976, 'grad_norm': 0.002211867879523168, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:46<26:27, 3.82s/it] 20%|██ | 106/520 [06:50<26:18, 3.81s/it] {'loss': 1.513, 'grad_norm': 0.0026963948536381723, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:50<26:18, 3.81s/it] 21%|██ | 107/520 [06:54<26:18, 3.82s/it] {'loss': 1.5197, 'grad_norm': 0.00282803780867953, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:54<26:18, 3.82s/it] 21%|██ | 108/520 [06:57<26:16, 3.83s/it] {'loss': 1.3569, 'grad_norm': 0.002643959124661249, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:57<26:16, 3.83s/it] 21%|██ | 109/520 [07:01<26:13, 3.83s/it] {'loss': 1.4633, 'grad_norm': 0.0023782596771220123, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:01<26:13, 3.83s/it] 21%|██ | 110/520 [07:05<26:07, 3.82s/it] {'loss': 1.5508, 'grad_norm': 0.002465656130724587, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:05<26:07, 3.82s/it] 21%|██▏ | 111/520 [07:09<26:01, 3.82s/it] {'loss': 1.5663, 'grad_norm': 0.0025954874906947752, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:09<26:01, 3.82s/it] 22%|██▏ | 112/520 [07:13<25:56, 3.81s/it] {'loss': 1.4385, 'grad_norm': 0.002502724958520052, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:13<25:56, 3.81s/it] 22%|██▏ | 113/520 [07:17<25:53, 3.82s/it] {'loss': 1.3047, 'grad_norm': 0.0021249557957361085, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:17<25:53, 3.82s/it] 22%|██▏ | 114/520 [07:20<25:47, 3.81s/it] {'loss': 1.4071, 'grad_norm': 0.002258033276450271, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:20<25:47, 3.81s/it] 22%|██▏ | 115/520 [07:24<25:41, 3.81s/it] {'loss': 1.5268, 'grad_norm': 0.0021670244176599344, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:24<25:41, 3.81s/it] 22%|██▏ | 116/520 [07:28<25:36, 3.80s/it] {'loss': 1.5252, 'grad_norm': 0.002151343859546296, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:28<25:36, 3.80s/it] 22%|██▎ | 117/520 [07:32<25:30, 3.80s/it] {'loss': 1.5004, 'grad_norm': 0.0025679502872093794, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:32<25:30, 3.80s/it] 23%|██▎ | 118/520 [07:36<25:27, 3.80s/it] {'loss': 1.3783, 'grad_norm': 0.0020586965756228194, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:36<25:27, 3.80s/it] 23%|██▎ | 119/520 [07:39<25:20, 3.79s/it] {'loss': 1.3362, 'grad_norm': 0.002127016468077169, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:39<25:20, 3.79s/it] 23%|██▎ | 120/520 [07:43<25:13, 3.78s/it] {'loss': 1.364, 'grad_norm': 0.002851462049027233, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:43<25:13, 3.78s/it] 23%|██▎ | 121/520 [07:47<25:07, 3.78s/it] {'loss': 1.418, 'grad_norm': 0.002425561403349422, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:47<25:07, 3.78s/it] 23%|██▎ | 122/520 [07:51<25:04, 3.78s/it] {'loss': 1.3046, 'grad_norm': 0.0021994217844631134, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:51<25:04, 3.78s/it] 24%|██▎ | 123/520 [07:54<25:01, 3.78s/it] {'loss': 1.5591, 'grad_norm': 0.003051351505574186, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:54<25:01, 3.78s/it] 24%|██▍ | 124/520 [07:58<24:56, 3.78s/it] {'loss': 1.3939, 'grad_norm': 0.0025144947857069083, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:58<24:56, 3.78s/it] 24%|██▍ | 125/520 [08:02<25:09, 3.82s/it] {'loss': 1.384, 'grad_norm': 0.0022957152621251274, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:02<25:09, 3.82s/it] 24%|██▍ | 126/520 [08:06<26:12, 3.99s/it] {'loss': 1.453, 'grad_norm': 0.0020415436767026564, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:07<26:12, 3.99s/it] 24%|██▍ | 127/520 [08:10<25:27, 3.89s/it] {'loss': 1.3568, 'grad_norm': 0.0027136325903940484, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:10<25:27, 3.89s/it] 25%|██▍ | 128/520 [08:14<24:57, 3.82s/it] {'loss': 1.4097, 'grad_norm': 0.0022660441093792106, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:14<24:57, 3.82s/it] 25%|██▍ | 129/520 [08:17<24:30, 3.76s/it] {'loss': 1.3321, 'grad_norm': 0.0018824119352205293, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:17<24:30, 3.76s/it] 25%|██▌ | 130/520 [08:21<24:11, 3.72s/it] {'loss': 1.3962, 'grad_norm': 0.0021013031884546916, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:21<24:11, 3.72s/it] 25%|██▌ | 131/520 [08:25<23:57, 3.70s/it] {'loss': 1.4061, 'grad_norm': 0.0023918589675293992, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:25<23:57, 3.70s/it] 25%|██▌ | 132/520 [08:28<23:48, 3.68s/it] {'loss': 1.4306, 'grad_norm': 0.002249384313721006, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:28<23:48, 3.68s/it] 26%|██▌ | 133/520 [08:32<23:41, 3.67s/it] {'loss': 1.3358, 'grad_norm': 0.0023354728954889164, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:32<23:41, 3.67s/it] 26%|██▌ | 134/520 [08:36<23:33, 3.66s/it] {'loss': 1.4216, 'grad_norm': 0.0025055814467991938, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:36<23:33, 3.66s/it] 26%|██▌ | 135/520 [08:39<23:23, 3.65s/it] {'loss': 1.4999, 'grad_norm': 0.002316393621981388, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:39<23:23, 3.65s/it] 26%|██▌ | 136/520 [08:43<23:18, 3.64s/it] {'loss': 1.4103, 'grad_norm': 0.002455436032200521, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:43<23:18, 3.64s/it] 26%|██▋ | 137/520 [08:46<23:11, 3.63s/it] {'loss': 1.3368, 'grad_norm': 0.002444531623265137, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:46<23:11, 3.63s/it] 27%|██▋ | 138/520 [08:50<23:06, 3.63s/it] {'loss': 1.3493, 'grad_norm': 0.0022190696549201335, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:50<23:06, 3.63s/it] 27%|██▋ | 139/520 [08:54<23:05, 3.64s/it] {'loss': 1.3101, 'grad_norm': 0.0024298827143001884, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:54<23:05, 3.64s/it] 27%|██▋ | 140/520 [08:57<23:10, 3.66s/it] {'loss': 1.4665, 'grad_norm': 0.0029443405264786907, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:57<23:10, 3.66s/it] 27%|██▋ | 141/520 [09:01<23:05, 3.66s/it] {'loss': 1.4617, 'grad_norm': 0.002298447019318404, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:01<23:05, 3.66s/it] 27%|██▋ | 142/520 [09:05<23:00, 3.65s/it] {'loss': 1.5266, 'grad_norm': 0.0022265333318947705, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:05<23:00, 3.65s/it] 28%|██▊ | 143/520 [09:08<22:57, 3.65s/it] {'loss': 1.3791, 'grad_norm': 0.002279356769818595, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:08<22:57, 3.65s/it] 28%|██▊ | 144/520 [09:12<22:54, 3.66s/it] {'loss': 1.3316, 'grad_norm': 0.00218291652524226, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:12<22:54, 3.66s/it] 28%|██▊ | 145/520 [09:16<22:52, 3.66s/it] {'loss': 1.2703, 'grad_norm': 0.001976699470058375, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:16<22:52, 3.66s/it] 28%|██▊ | 146/520 [09:19<22:49, 3.66s/it] {'loss': 1.5242, 'grad_norm': 0.0021957549893094033, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:19<22:49, 3.66s/it] 28%|██▊ | 147/520 [09:23<22:46, 3.66s/it] {'loss': 1.3172, 'grad_norm': 0.0021971179690210576, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:23<22:46, 3.66s/it] 28%|██▊ | 148/520 [09:27<22:38, 3.65s/it] {'loss': 1.3384, 'grad_norm': 0.002043695928003757, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:27<22:38, 3.65s/it] 29%|██▊ | 149/520 [09:30<22:36, 3.66s/it] {'loss': 1.2922, 'grad_norm': 0.0021445170326289263, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:30<22:36, 3.66s/it] 29%|██▉ | 150/520 [09:34<22:32, 3.65s/it] {'loss': 1.5337, 'grad_norm': 0.002353181395344592, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:34<22:32, 3.65s/it] 29%|██▉ | 151/520 [09:38<22:29, 3.66s/it] {'loss': 1.3386, 'grad_norm': 0.002192117712999578, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:38<22:29, 3.66s/it] 29%|██▉ | 152/520 [09:41<22:26, 3.66s/it] {'loss': 1.3107, 'grad_norm': 0.0023531433418324506, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:41<22:26, 3.66s/it] 29%|██▉ | 153/520 [09:45<22:22, 3.66s/it] {'loss': 1.3406, 'grad_norm': 0.0022522771143399286, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:45<22:22, 3.66s/it] 30%|██▉ | 154/520 [09:49<22:19, 3.66s/it] {'loss': 1.4326, 'grad_norm': 0.0020312889186939135, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:49<22:19, 3.66s/it] 30%|██▉ | 155/520 [09:52<22:15, 3.66s/it] {'loss': 1.3361, 'grad_norm': 0.0022344723485385803, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:52<22:15, 3.66s/it] 30%|███ | 156/520 [09:56<22:13, 3.66s/it] {'loss': 1.3605, 'grad_norm': 0.002229048664685277, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:56<22:13, 3.66s/it] 30%|███ | 157/520 [10:00<22:11, 3.67s/it] {'loss': 1.5476, 'grad_norm': 0.002938890816931853, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:00<22:11, 3.67s/it] 30%|███ | 158/520 [10:03<22:07, 3.67s/it] {'loss': 1.3441, 'grad_norm': 0.0025186161799528248, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:03<22:07, 3.67s/it] 31%|███ | 159/520 [10:07<22:03, 3.66s/it] {'loss': 1.3642, 'grad_norm': 0.002053182001181053, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:07<22:03, 3.66s/it] 31%|███ | 160/520 [10:11<21:58, 3.66s/it] {'loss': 1.3906, 'grad_norm': 0.002231754439759347, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:11<21:58, 3.66s/it] 31%|███ | 161/520 [10:14<22:03, 3.69s/it] {'loss': 1.3802, 'grad_norm': 0.002118232199388918, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:14<22:03, 3.69s/it] 31%|███ | 162/520 [10:18<21:57, 3.68s/it] {'loss': 1.4584, 'grad_norm': 0.0025741653042351573, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:18<21:57, 3.68s/it] 31%|███▏ | 163/520 [10:22<21:50, 3.67s/it] {'loss': 1.2622, 'grad_norm': 0.0028073594423031253, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:22<21:50, 3.67s/it] 32%|███▏ | 164/520 [10:25<21:46, 3.67s/it] {'loss': 1.2276, 'grad_norm': 0.002136326681208409, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:25<21:46, 3.67s/it] 32%|███▏ | 165/520 [10:29<21:42, 3.67s/it] {'loss': 1.3673, 'grad_norm': 0.0020479769014439065, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:29<21:42, 3.67s/it] 32%|███▏ | 166/520 [10:33<21:36, 3.66s/it] {'loss': 1.3645, 'grad_norm': 0.002347516857373837, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:33<21:36, 3.66s/it] 32%|███▏ | 167/520 [10:36<21:32, 3.66s/it] {'loss': 1.3622, 'grad_norm': 0.0026372174527958337, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:36<21:32, 3.66s/it] 32%|███▏ | 168/520 [10:40<21:28, 3.66s/it] {'loss': 1.283, 'grad_norm': 0.0022145913587183325, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:40<21:28, 3.66s/it] 32%|███▎ | 169/520 [10:44<21:25, 3.66s/it] {'loss': 1.3743, 'grad_norm': 0.002033584311662868, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:44<21:25, 3.66s/it] 33%|███▎ | 170/520 [10:47<21:22, 3.67s/it] {'loss': 1.3695, 'grad_norm': 0.002242902704475644, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:47<21:22, 3.67s/it] 33%|███▎ | 171/520 [10:51<21:17, 3.66s/it] {'loss': 1.2994, 'grad_norm': 0.0022658266328720073, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:51<21:17, 3.66s/it] 33%|███▎ | 172/520 [10:55<21:13, 3.66s/it] {'loss': 1.3695, 'grad_norm': 0.002064882889859168, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:55<21:13, 3.66s/it] 33%|███▎ | 173/520 [10:58<21:10, 3.66s/it] {'loss': 1.2986, 'grad_norm': 0.0020594951844696303, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:58<21:10, 3.66s/it] 33%|███▎ | 174/520 [11:02<21:07, 3.66s/it] {'loss': 1.3734, 'grad_norm': 0.002413678488339266, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:02<21:07, 3.66s/it] 34%|███▎ | 175/520 [11:06<21:05, 3.67s/it] {'loss': 1.2677, 'grad_norm': 0.0020222578835892096, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:06<21:05, 3.67s/it] 34%|███▍ | 176/520 [11:09<21:01, 3.67s/it] {'loss': 1.4659, 'grad_norm': 0.0021698143443103016, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:09<21:01, 3.67s/it] 34%|███▍ | 177/520 [11:13<20:57, 3.67s/it] {'loss': 1.3267, 'grad_norm': 0.0024053443244511025, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:13<20:57, 3.67s/it] 34%|███▍ | 178/520 [11:17<20:52, 3.66s/it] {'loss': 1.3408, 'grad_norm': 0.0023370241618721746, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:17<20:52, 3.66s/it] 34%|███▍ | 179/520 [11:20<20:45, 3.65s/it] {'loss': 1.4299, 'grad_norm': 0.0020725300872958654, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:20<20:45, 3.65s/it] 35%|███▍ | 180/520 [11:24<20:44, 3.66s/it] {'loss': 1.3464, 'grad_norm': 0.0022850442887218806, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:24<20:44, 3.66s/it] 35%|███▍ | 181/520 [11:28<20:41, 3.66s/it] {'loss': 1.3132, 'grad_norm': 0.0020017604341109286, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:28<20:41, 3.66s/it] 35%|███▌ | 182/520 [11:31<20:37, 3.66s/it] {'loss': 1.3237, 'grad_norm': 0.002043023240352057, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:31<20:37, 3.66s/it] 35%|███▌ | 183/520 [11:35<20:31, 3.65s/it] {'loss': 1.3568, 'grad_norm': 0.002182278063952684, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:35<20:31, 3.65s/it] 35%|███▌ | 184/520 [11:39<20:26, 3.65s/it] {'loss': 1.2607, 'grad_norm': 0.002236506023092556, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:39<20:26, 3.65s/it] 36%|███▌ | 185/520 [11:42<20:23, 3.65s/it] {'loss': 1.4435, 'grad_norm': 0.0020544306005756976, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:42<20:23, 3.65s/it] 36%|███▌ | 186/520 [11:46<20:19, 3.65s/it] {'loss': 1.2918, 'grad_norm': 0.0020395021478496174, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:46<20:19, 3.65s/it] 36%|███▌ | 187/520 [11:50<20:16, 3.65s/it] {'loss': 1.3, 'grad_norm': 0.0024664408096150775, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:50<20:16, 3.65s/it] 36%|███▌ | 188/520 [11:53<20:11, 3.65s/it] {'loss': 1.3826, 'grad_norm': 0.002248514897250561, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:53<20:11, 3.65s/it] 36%|███▋ | 189/520 [11:57<20:18, 3.68s/it] {'loss': 1.3899, 'grad_norm': 0.0018844675486248922, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:57<20:18, 3.68s/it] 37%|███▋ | 190/520 [12:01<20:09, 3.66s/it] {'loss': 1.3066, 'grad_norm': 0.0022824701795560007, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:01<20:09, 3.66s/it] 37%|███▋ | 191/520 [12:04<20:06, 3.67s/it] {'loss': 1.2635, 'grad_norm': 0.001943764917034255, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:04<20:06, 3.67s/it] 37%|███▋ | 192/520 [12:08<20:03, 3.67s/it] {'loss': 1.3532, 'grad_norm': 0.0020471842101220557, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:08<20:03, 3.67s/it] 37%|███▋ | 193/520 [12:12<19:59, 3.67s/it] {'loss': 1.4012, 'grad_norm': 0.0024911645472931315, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:12<19:59, 3.67s/it] 37%|███▋ | 194/520 [12:15<20:05, 3.70s/it] {'loss': 1.26, 'grad_norm': 0.002018851747934751, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:15<20:05, 3.70s/it] 38%|███▊ | 195/520 [12:19<19:59, 3.69s/it] {'loss': 1.3693, 'grad_norm': 0.0021001854515795616, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:19<19:59, 3.69s/it] 38%|███▊ | 196/520 [12:23<19:52, 3.68s/it] {'loss': 1.3321, 'grad_norm': 0.002254419801623629, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:23<19:52, 3.68s/it] 38%|███▊ | 197/520 [12:26<19:49, 3.68s/it] {'loss': 1.2918, 'grad_norm': 0.0022692198265729852, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:26<19:49, 3.68s/it] 38%|███▊ | 198/520 [12:30<19:44, 3.68s/it] {'loss': 1.3684, 'grad_norm': 0.00218868568626785, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:30<19:44, 3.68s/it] 38%|███▊ | 199/520 [12:34<19:42, 3.68s/it] {'loss': 1.2812, 'grad_norm': 0.0021891195117254366, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:34<19:42, 3.68s/it] 38%|███▊ | 200/520 [12:37<19:39, 3.69s/it] {'loss': 1.3241, 'grad_norm': 0.0023036878460354375, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:37<19:39, 3.69s/it] 39%|███▊ | 201/520 [12:41<19:35, 3.68s/it] {'loss': 1.334, 'grad_norm': 0.00188178673185312, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:41<19:35, 3.68s/it] 39%|███▉ | 202/520 [12:45<19:31, 3.68s/it] {'loss': 1.2832, 'grad_norm': 0.0020267510148341853, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:45<19:31, 3.68s/it] 39%|███▉ | 203/520 [12:48<19:26, 3.68s/it] {'loss': 1.3285, 'grad_norm': 0.002177837954979882, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:48<19:26, 3.68s/it] 39%|███▉ | 204/520 [12:52<19:20, 3.67s/it] {'loss': 1.3609, 'grad_norm': 0.002178442950287651, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:52<19:20, 3.67s/it] 39%|███▉ | 205/520 [12:56<19:18, 3.68s/it] {'loss': 1.3463, 'grad_norm': 0.002165238961013122, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:56<19:18, 3.68s/it] 40%|███▉ | 206/520 [12:59<19:11, 3.67s/it] {'loss': 1.4035, 'grad_norm': 0.0021078691736061994, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:59<19:11, 3.67s/it] 40%|███▉ | 207/520 [13:03<19:08, 3.67s/it] {'loss': 1.3232, 'grad_norm': 0.0018574520210515142, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:03<19:08, 3.67s/it] 40%|████ | 208/520 [13:07<19:05, 3.67s/it] {'loss': 1.3732, 'grad_norm': 0.0022820449868450436, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:07<19:05, 3.67s/it] 40%|████ | 209/520 [13:10<19:02, 3.67s/it] {'loss': 1.2862, 'grad_norm': 0.0020317081878278875, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:10<19:02, 3.67s/it] 40%|████ | 210/520 [13:14<18:59, 3.68s/it] {'loss': 1.3752, 'grad_norm': 0.002414590655979455, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:14<18:59, 3.68s/it] 41%|████ | 211/520 [13:18<18:57, 3.68s/it] {'loss': 1.374, 'grad_norm': 0.0019288298432611087, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:18<18:57, 3.68s/it] 41%|████ | 212/520 [13:22<18:57, 3.69s/it] {'loss': 1.3463, 'grad_norm': 0.001960249728884778, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:22<18:57, 3.69s/it] 41%|████ | 213/520 [13:25<19:03, 3.72s/it] {'loss': 1.309, 'grad_norm': 0.0024117303021552404, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:25<19:03, 3.72s/it] 41%|████ | 214/520 [13:29<19:09, 3.76s/it] {'loss': 1.2985, 'grad_norm': 0.002134564591976939, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:29<19:09, 3.76s/it] 41%|████▏ | 215/520 [13:33<19:08, 3.77s/it] {'loss': 1.2663, 'grad_norm': 0.0020187660081799307, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:33<19:08, 3.77s/it] 42%|████▏ | 216/520 [13:37<19:10, 3.78s/it] {'loss': 1.2154, 'grad_norm': 0.001987697089326168, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:37<19:10, 3.78s/it] 42%|████▏ | 217/520 [13:41<19:06, 3.78s/it] {'loss': 1.3386, 'grad_norm': 0.002155677184586559, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:41<19:06, 3.78s/it] 42%|████▏ | 218/520 [13:44<19:07, 3.80s/it] {'loss': 1.337, 'grad_norm': 0.0022014582708344754, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:44<19:07, 3.80s/it] 42%|████▏ | 219/520 [13:48<19:03, 3.80s/it] {'loss': 1.3055, 'grad_norm': 0.0018311160558055197, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:48<19:03, 3.80s/it] 42%|████▏ | 220/520 [13:52<18:57, 3.79s/it] {'loss': 1.3156, 'grad_norm': 0.002170619231885686, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:52<18:57, 3.79s/it] 42%|████▎ | 221/520 [13:56<18:52, 3.79s/it] {'loss': 1.336, 'grad_norm': 0.002044173458929818, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:56<18:52, 3.79s/it] 43%|████▎ | 222/520 [13:59<18:37, 3.75s/it] {'loss': 1.2507, 'grad_norm': 0.0020117574243058104, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:59<18:37, 3.75s/it] 43%|████▎ | 223/520 [14:03<18:27, 3.73s/it] {'loss': 1.2468, 'grad_norm': 0.0018872574688494582, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:03<18:27, 3.73s/it] 43%|████▎ | 224/520 [14:07<18:18, 3.71s/it] {'loss': 1.4595, 'grad_norm': 0.002563408458230363, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:07<18:18, 3.71s/it] 43%|████▎ | 225/520 [14:10<18:11, 3.70s/it] {'loss': 1.2619, 'grad_norm': 0.002072759505674558, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:10<18:11, 3.70s/it] 43%|████▎ | 226/520 [14:14<18:10, 3.71s/it] {'loss': 1.3685, 'grad_norm': 0.0019690674814372565, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:14<18:10, 3.71s/it] 44%|████▎ | 227/520 [14:18<18:02, 3.69s/it] {'loss': 1.353, 'grad_norm': 0.0019295255896096, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:18<18:02, 3.69s/it] 44%|████▍ | 228/520 [14:22<18:01, 3.70s/it] {'loss': 1.4561, 'grad_norm': 0.0021731509725110686, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:22<18:01, 3.70s/it] 44%|████▍ | 229/520 [14:25<18:15, 3.77s/it] {'loss': 1.3172, 'grad_norm': 0.0017946056678938564, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:25<18:15, 3.77s/it] 44%|████▍ | 230/520 [14:29<18:10, 3.76s/it] {'loss': 1.1979, 'grad_norm': 0.001919269711083073, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:29<18:10, 3.76s/it] 44%|████▍ | 231/520 [14:33<17:57, 3.73s/it] {'loss': 1.2677, 'grad_norm': 0.0018401928628357315, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:33<17:57, 3.73s/it] 45%|████▍ | 232/520 [14:37<17:47, 3.71s/it] {'loss': 1.4841, 'grad_norm': 0.002281987559982159, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:37<17:47, 3.71s/it] 45%|████▍ | 233/520 [14:40<17:38, 3.69s/it] {'loss': 1.3563, 'grad_norm': 0.0021809711516100104, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:40<17:38, 3.69s/it] 45%|████▌ | 234/520 [14:44<17:34, 3.69s/it] {'loss': 1.2049, 'grad_norm': 0.0020462719731431005, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:44<17:34, 3.69s/it] 45%|████▌ | 235/520 [14:48<17:34, 3.70s/it] {'loss': 1.2769, 'grad_norm': 0.002206456322038293, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:48<17:34, 3.70s/it] 45%|████▌ | 236/520 [14:51<17:28, 3.69s/it] {'loss': 1.3833, 'grad_norm': 0.0019011389715878732, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:51<17:28, 3.69s/it] 46%|████▌ | 237/520 [14:55<17:22, 3.68s/it] {'loss': 1.3437, 'grad_norm': 0.001933222332792352, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:55<17:22, 3.68s/it] 46%|████▌ | 238/520 [14:59<17:17, 3.68s/it] {'loss': 1.2838, 'grad_norm': 0.0019934374470867195, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:59<17:17, 3.68s/it] 46%|████▌ | 239/520 [15:02<17:11, 3.67s/it] {'loss': 1.3792, 'grad_norm': 0.002027973058674603, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:02<17:11, 3.67s/it] 46%|████▌ | 240/520 [15:06<17:08, 3.67s/it] {'loss': 1.157, 'grad_norm': 0.002072481486553601, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:06<17:08, 3.67s/it] 46%|████▋ | 241/520 [15:10<17:02, 3.67s/it] {'loss': 1.2477, 'grad_norm': 0.0019046920162401901, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:10<17:02, 3.67s/it] 47%|████▋ | 242/520 [15:13<16:58, 3.67s/it] {'loss': 1.2626, 'grad_norm': 0.0018737209428156744, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:13<16:58, 3.67s/it] 47%|████▋ | 243/520 [15:17<16:54, 3.66s/it] {'loss': 1.2481, 'grad_norm': 0.001955862583488398, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:17<16:54, 3.66s/it] 47%|████▋ | 244/520 [15:21<16:51, 3.66s/it] {'loss': 1.3872, 'grad_norm': 0.0019817104223106828, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:21<16:51, 3.66s/it] 47%|████▋ | 245/520 [15:24<16:47, 3.67s/it] {'loss': 1.2352, 'grad_norm': 0.0019889334725264754, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:24<16:47, 3.67s/it] 47%|████▋ | 246/520 [15:28<16:46, 3.67s/it] {'loss': 1.446, 'grad_norm': 0.0020993124034571578, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:28<16:46, 3.67s/it] 48%|████▊ | 247/520 [15:32<16:42, 3.67s/it] {'loss': 1.4219, 'grad_norm': 0.001992157518301301, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:32<16:42, 3.67s/it] 48%|████▊ | 248/520 [15:35<16:37, 3.67s/it] {'loss': 1.2524, 'grad_norm': 0.0020838917932195144, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:35<16:37, 3.67s/it] 48%|████▊ | 249/520 [15:39<16:32, 3.66s/it] {'loss': 1.3472, 'grad_norm': 0.0019976846954777327, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:39<16:32, 3.66s/it] 48%|████▊ | 250/520 [15:43<16:28, 3.66s/it] {'loss': 1.2765, 'grad_norm': 0.00209704510342004, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:43<16:28, 3.66s/it] 48%|████▊ | 251/520 [15:46<16:26, 3.67s/it] {'loss': 1.3464, 'grad_norm': 0.0017957704267117167, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:46<16:26, 3.67s/it] 48%|████▊ | 252/520 [15:50<16:22, 3.66s/it] {'loss': 1.3428, 'grad_norm': 0.0020560364908631607, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:50<16:22, 3.66s/it] 49%|████▊ | 253/520 [15:54<16:19, 3.67s/it] {'loss': 1.3454, 'grad_norm': 0.0023137899003855015, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:54<16:19, 3.67s/it] 49%|████▉ | 254/520 [15:57<16:15, 3.67s/it] {'loss': 1.2608, 'grad_norm': 0.001831148140354237, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:57<16:15, 3.67s/it] 49%|████▉ | 255/520 [16:01<16:10, 3.66s/it] {'loss': 1.2764, 'grad_norm': 0.002169445573710334, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:01<16:10, 3.66s/it] 49%|████▉ | 256/520 [16:05<16:05, 3.66s/it] {'loss': 1.3221, 'grad_norm': 0.002064957187077119, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:05<16:05, 3.66s/it] 49%|████▉ | 257/520 [16:08<16:00, 3.65s/it] {'loss': 1.3113, 'grad_norm': 0.002035652479190026, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:08<16:00, 3.65s/it] 50%|████▉ | 258/520 [16:12<15:59, 3.66s/it] {'loss': 1.3208, 'grad_norm': 0.0017178732804551686, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:12<15:59, 3.66s/it] 50%|████▉ | 259/520 [16:16<16:04, 3.70s/it] {'loss': 1.3967, 'grad_norm': 0.0023346211174769794, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:16<16:04, 3.70s/it] 50%|█████ | 260/520 [16:20<16:16, 3.76s/it] {'loss': 1.4071, 'grad_norm': 0.0019204482838990425, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:20<16:16, 3.76s/it] 50%|█████ | 261/520 [16:23<16:22, 3.79s/it] {'loss': 1.3383, 'grad_norm': 0.0019475114574950942, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:23<16:22, 3.79s/it] 50%|█████ | 262/520 [16:27<16:23, 3.81s/it] {'loss': 1.2325, 'grad_norm': 0.002014595542945627, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:27<16:23, 3.81s/it] 51%|█████ | 263/520 [16:31<16:23, 3.83s/it] {'loss': 1.3493, 'grad_norm': 0.0020266878293558072, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:31<16:23, 3.83s/it] 51%|█████ | 264/520 [16:35<16:27, 3.86s/it] {'loss': 1.3514, 'grad_norm': 0.001954599551847743, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:35<16:27, 3.86s/it] 51%|█████ | 265/520 [16:39<16:24, 3.86s/it] {'loss': 1.2447, 'grad_norm': 0.002188772800682326, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:39<16:24, 3.86s/it] 51%|█████ | 266/520 [16:43<16:20, 3.86s/it] {'loss': 1.1057, 'grad_norm': 0.0017334528239022473, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:43<16:20, 3.86s/it] 51%|█████▏ | 267/520 [16:47<16:17, 3.86s/it] {'loss': 1.2473, 'grad_norm': 0.0018558214568141235, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:47<16:17, 3.86s/it] 52%|█████▏ | 268/520 [16:51<16:14, 3.87s/it] {'loss': 1.4701, 'grad_norm': 0.0028725447597716734, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:51<16:14, 3.87s/it] 52%|█████▏ | 269/520 [16:54<16:10, 3.87s/it] {'loss': 1.3632, 'grad_norm': 0.0020745725749862208, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:54<16:10, 3.87s/it] 52%|█████▏ | 270/520 [16:58<16:08, 3.87s/it] {'loss': 1.2718, 'grad_norm': 0.0019367931351395129, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:58<16:08, 3.87s/it] 52%|█████▏ | 271/520 [17:02<16:04, 3.87s/it] {'loss': 1.3526, 'grad_norm': 0.0019768786432918894, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:02<16:04, 3.87s/it] 52%|█████▏ | 272/520 [17:06<16:03, 3.89s/it] {'loss': 1.28, 'grad_norm': 0.0020692387305375646, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:06<16:03, 3.89s/it] 52%|█████▎ | 273/520 [17:10<15:58, 3.88s/it] {'loss': 1.4201, 'grad_norm': 0.0022615649093597912, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:10<15:58, 3.88s/it] 53%|█████▎ | 274/520 [17:14<15:55, 3.89s/it] {'loss': 1.304, 'grad_norm': 0.0022963075712658194, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:14<15:55, 3.89s/it] 53%|█████▎ | 275/520 [17:18<15:51, 3.88s/it] {'loss': 1.2487, 'grad_norm': 0.002115260255241062, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:18<15:51, 3.88s/it] 53%|█████▎ | 276/520 [17:22<15:46, 3.88s/it] {'loss': 1.3303, 'grad_norm': 0.0021858687533456954, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:22<15:46, 3.88s/it] 53%|█████▎ | 277/520 [17:25<15:42, 3.88s/it] {'loss': 1.4012, 'grad_norm': 0.0019205488696924647, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:25<15:42, 3.88s/it] 53%|█████▎ | 278/520 [17:29<15:38, 3.88s/it] {'loss': 1.1981, 'grad_norm': 0.0018579603214232976, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:29<15:38, 3.88s/it] 54%|█████▎ | 279/520 [17:33<15:36, 3.89s/it] {'loss': 1.296, 'grad_norm': 0.0022690383576180156, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:33<15:36, 3.89s/it] 54%|█████▍ | 280/520 [17:37<15:33, 3.89s/it] {'loss': 1.2554, 'grad_norm': 0.0022991016405343265, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:37<15:33, 3.89s/it] 54%|█████▍ | 281/520 [17:41<15:28, 3.89s/it] {'loss': 1.3596, 'grad_norm': 0.0021382125175121256, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:41<15:28, 3.89s/it] 54%|█████▍ | 282/520 [17:45<15:23, 3.88s/it] {'loss': 1.2124, 'grad_norm': 0.0018094812290353902, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:45<15:23, 3.88s/it] 54%|█████▍ | 283/520 [17:49<15:19, 3.88s/it] {'loss': 1.3843, 'grad_norm': 0.0022164245346728484, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:49<15:19, 3.88s/it] 55%|█████▍ | 284/520 [17:53<15:14, 3.87s/it] {'loss': 1.2722, 'grad_norm': 0.0021109125958655404, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:53<15:14, 3.87s/it] 55%|█████▍ | 285/520 [17:56<14:55, 3.81s/it] {'loss': 1.2405, 'grad_norm': 0.0020094428699905915, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:56<14:55, 3.81s/it] 55%|█████▌ | 286/520 [18:00<14:42, 3.77s/it] {'loss': 1.1095, 'grad_norm': 0.002313525598738688, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:00<14:42, 3.77s/it] 55%|█████▌ | 287/520 [18:04<14:35, 3.76s/it] {'loss': 1.3517, 'grad_norm': 0.002078526194850874, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:04<14:35, 3.76s/it] 55%|█████▌ | 288/520 [18:07<14:23, 3.72s/it] {'loss': 1.392, 'grad_norm': 0.0019534045386602004, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:07<14:23, 3.72s/it] 56%|█████▌ | 289/520 [18:11<14:15, 3.70s/it] {'loss': 1.249, 'grad_norm': 0.0018630205295434273, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:11<14:15, 3.70s/it] 56%|█████▌ | 290/520 [18:15<14:09, 3.69s/it] {'loss': 1.1723, 'grad_norm': 0.0017787338512791212, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:15<14:09, 3.69s/it] 56%|█████▌ | 291/520 [18:18<14:06, 3.70s/it] {'loss': 1.2355, 'grad_norm': 0.0020680790148304953, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:18<14:06, 3.70s/it] 56%|█████▌ | 292/520 [18:22<13:58, 3.68s/it] {'loss': 1.2847, 'grad_norm': 0.0018973253973469807, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:22<13:58, 3.68s/it] 56%|█████▋ | 293/520 [18:26<13:55, 3.68s/it] {'loss': 1.225, 'grad_norm': 0.002153404297250794, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:26<13:55, 3.68s/it] 57%|█████▋ | 294/520 [18:29<13:53, 3.69s/it] {'loss': 1.2504, 'grad_norm': 0.002180212006331943, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:29<13:53, 3.69s/it] 57%|█████▋ | 295/520 [18:33<13:47, 3.68s/it] {'loss': 1.3365, 'grad_norm': 0.00213024383614413, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:33<13:47, 3.68s/it] 57%|█████▋ | 296/520 [18:37<13:42, 3.67s/it] {'loss': 1.1923, 'grad_norm': 0.0020121114585607085, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:37<13:42, 3.67s/it] 57%|█████▋ | 297/520 [18:40<13:37, 3.67s/it] {'loss': 1.3232, 'grad_norm': 0.0021392069544408448, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:40<13:37, 3.67s/it] 57%|█████▋ | 298/520 [18:44<13:35, 3.67s/it] {'loss': 1.2889, 'grad_norm': 0.0017189749798397758, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:44<13:35, 3.67s/it] 57%|█████▊ | 299/520 [18:48<13:31, 3.67s/it] {'loss': 1.3489, 'grad_norm': 0.001860329219053845, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:48<13:31, 3.67s/it] 58%|█████▊ | 300/520 [18:51<13:31, 3.69s/it] {'loss': 1.35, 'grad_norm': 0.002003749994803881, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:51<13:31, 3.69s/it] 58%|█████▊ | 301/520 [18:55<13:27, 3.69s/it] {'loss': 1.3162, 'grad_norm': 0.0019572566723848484, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:55<13:27, 3.69s/it] 58%|█████▊ | 302/520 [18:59<13:23, 3.69s/it] {'loss': 1.3701, 'grad_norm': 0.0019641623943077546, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:59<13:23, 3.69s/it] 58%|█████▊ | 303/520 [19:02<13:18, 3.68s/it] {'loss': 1.2453, 'grad_norm': 0.0021593446713797336, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:02<13:18, 3.68s/it] 58%|█████▊ | 304/520 [19:06<13:16, 3.69s/it] {'loss': 1.2664, 'grad_norm': 0.0021575234064066568, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:06<13:16, 3.69s/it] 59%|█████▊ | 305/520 [19:10<13:09, 3.67s/it] {'loss': 1.3581, 'grad_norm': 0.002262176955394668, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:10<13:09, 3.67s/it] 59%|█████▉ | 306/520 [19:14<13:21, 3.74s/it] {'loss': 1.2986, 'grad_norm': 0.0019493400213109273, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:14<13:21, 3.74s/it] 59%|█████▉ | 307/520 [19:18<13:54, 3.92s/it] {'loss': 1.2328, 'grad_norm': 0.001826940347294174, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:18<13:54, 3.92s/it] 59%|█████▉ | 308/520 [19:22<13:48, 3.91s/it] {'loss': 1.3477, 'grad_norm': 0.0020102624006824526, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:22<13:48, 3.91s/it] 59%|█████▉ | 309/520 [19:26<13:42, 3.90s/it] {'loss': 1.2304, 'grad_norm': 0.0018457837973905012, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:26<13:42, 3.90s/it] 60%|█████▉ | 310/520 [19:30<13:38, 3.90s/it] {'loss': 1.2071, 'grad_norm': 0.001994894680786579, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:30<13:38, 3.90s/it] 60%|█████▉ | 311/520 [19:34<13:32, 3.89s/it] {'loss': 1.1798, 'grad_norm': 0.0019711143754231487, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:34<13:32, 3.89s/it] 60%|██████ | 312/520 [19:37<13:27, 3.88s/it] {'loss': 1.172, 'grad_norm': 0.002136467267925463, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:37<13:27, 3.88s/it] 60%|██████ | 313/520 [19:41<13:21, 3.87s/it] {'loss': 1.1631, 'grad_norm': 0.0018023673484234464, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:41<13:21, 3.87s/it] 60%|██████ | 314/520 [19:46<13:41, 3.99s/it] {'loss': 1.2025, 'grad_norm': 0.001773479520919837, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:46<13:41, 3.99s/it] 61%|██████ | 315/520 [19:49<13:28, 3.94s/it] {'loss': 1.3322, 'grad_norm': 0.0024301718490895453, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:49<13:28, 3.94s/it] 61%|██████ | 316/520 [19:54<13:49, 4.07s/it] {'loss': 1.1726, 'grad_norm': 0.0023548295446356236, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:54<13:49, 4.07s/it] 61%|██████ | 317/520 [19:58<13:33, 4.01s/it] {'loss': 1.1948, 'grad_norm': 0.0017853542936820074, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:58<13:33, 4.01s/it] 61%|██████ | 318/520 [20:02<13:20, 3.97s/it] {'loss': 1.3162, 'grad_norm': 0.002106321534146669, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:02<13:20, 3.97s/it] 61%|██████▏ | 319/520 [20:06<13:30, 4.03s/it] {'loss': 1.185, 'grad_norm': 0.0019615497921406898, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:06<13:30, 4.03s/it] 62%|██████▏ | 320/520 [20:10<13:16, 3.98s/it] {'loss': 1.1233, 'grad_norm': 0.002121649289776954, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:10<13:16, 3.98s/it] 62%|██████▏ | 321/520 [20:13<13:06, 3.95s/it] {'loss': 1.3283, 'grad_norm': 0.0020715470154343716, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:13<13:06, 3.95s/it] 62%|██████▏ | 322/520 [20:17<12:57, 3.93s/it] {'loss': 1.1908, 'grad_norm': 0.0020544872552123927, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:17<12:57, 3.93s/it] 62%|██████▏ | 323/520 [20:21<12:50, 3.91s/it] {'loss': 1.2694, 'grad_norm': 0.002021637194300724, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:21<12:50, 3.91s/it] 62%|██████▏ | 324/520 [20:25<12:43, 3.90s/it] {'loss': 1.2584, 'grad_norm': 0.0020632760857690873, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:25<12:43, 3.90s/it] 62%|██████▎ | 325/520 [20:29<12:37, 3.89s/it] {'loss': 1.2745, 'grad_norm': 0.002111187188576593, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:29<12:37, 3.89s/it] 63%|██████▎ | 326/520 [20:33<12:31, 3.87s/it] {'loss': 1.2568, 'grad_norm': 0.001955154912726498, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:33<12:31, 3.87s/it] 63%|██████▎ | 327/520 [20:37<12:26, 3.87s/it] {'loss': 1.3346, 'grad_norm': 0.0021337648161938866, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:37<12:26, 3.87s/it] 63%|██████▎ | 328/520 [20:40<12:20, 3.86s/it] {'loss': 1.3222, 'grad_norm': 0.002064184533927525, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:40<12:20, 3.86s/it] 63%|██████▎ | 329/520 [20:44<12:17, 3.86s/it] {'loss': 1.1744, 'grad_norm': 0.0017117092530173554, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:44<12:17, 3.86s/it] 63%|██████▎ | 330/520 [20:48<12:12, 3.85s/it] {'loss': 1.2521, 'grad_norm': 0.0017560268645123187, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:48<12:12, 3.85s/it] 64%|██████▎ | 331/520 [20:52<12:08, 3.85s/it] {'loss': 1.2108, 'grad_norm': 0.0018740372300613962, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:52<12:08, 3.85s/it] 64%|██████▍ | 332/520 [20:56<12:07, 3.87s/it] {'loss': 1.3387, 'grad_norm': 0.0019001936615010027, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:56<12:07, 3.87s/it] 64%|██████▍ | 333/520 [21:00<12:03, 3.87s/it] {'loss': 1.3707, 'grad_norm': 0.002071945662969299, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:00<12:03, 3.87s/it] 64%|██████▍ | 334/520 [21:04<12:00, 3.87s/it] {'loss': 1.2593, 'grad_norm': 0.0022210476035876784, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:04<12:00, 3.87s/it] 64%|██████▍ | 335/520 [21:07<11:43, 3.80s/it] {'loss': 1.2574, 'grad_norm': 0.001744074841703049, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:07<11:43, 3.80s/it] 65%|██████▍ | 336/520 [21:11<11:31, 3.76s/it] {'loss': 1.1482, 'grad_norm': 0.0021241571514120294, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:11<11:31, 3.76s/it] 65%|██████▍ | 337/520 [21:15<11:22, 3.73s/it] {'loss': 1.1412, 'grad_norm': 0.0019425827364599533, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:15<11:22, 3.73s/it] 65%|██████▌ | 338/520 [21:18<11:14, 3.71s/it] {'loss': 1.2714, 'grad_norm': 0.0019288204304793663, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:18<11:14, 3.71s/it] 65%|██████▌ | 339/520 [21:22<11:08, 3.69s/it] {'loss': 1.2099, 'grad_norm': 0.0019273164568554242, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:22<11:08, 3.69s/it] 65%|██████▌ | 340/520 [21:26<11:04, 3.69s/it] {'loss': 1.2033, 'grad_norm': 0.002006491061991154, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:26<11:04, 3.69s/it] 66%|██████▌ | 341/520 [21:29<10:58, 3.68s/it] {'loss': 1.2164, 'grad_norm': 0.0020417315750213367, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:29<10:58, 3.68s/it] 66%|██████▌ | 342/520 [21:33<10:51, 3.66s/it] {'loss': 1.3066, 'grad_norm': 0.0023941620634907934, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:33<10:51, 3.66s/it] 66%|██████▌ | 343/520 [21:37<10:48, 3.67s/it] {'loss': 1.2757, 'grad_norm': 0.0019516720948067861, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:37<10:48, 3.67s/it] 66%|██████▌ | 344/520 [21:40<10:44, 3.66s/it] {'loss': 1.1705, 'grad_norm': 0.0019550707743265757, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:40<10:44, 3.66s/it] 66%|██████▋ | 345/520 [21:44<10:40, 3.66s/it] {'loss': 1.2914, 'grad_norm': 0.0021782603905184965, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:44<10:40, 3.66s/it] 67%|██████▋ | 346/520 [21:48<10:36, 3.66s/it] {'loss': 1.2701, 'grad_norm': 0.0018318380737262342, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:48<10:36, 3.66s/it] 67%|██████▋ | 347/520 [21:51<10:32, 3.65s/it] {'loss': 1.1898, 'grad_norm': 0.0018144397674571954, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:51<10:32, 3.65s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:55<10:29, 3.66s/it] {'loss': 1.1452, 'grad_norm': 0.002268603176214715, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:55<10:29, 3.66s/it] 67%|██████▋ | 349/520 [21:58<10:24, 3.65s/it] {'loss': 1.193, 'grad_norm': 0.002040327489041712, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:58<10:24, 3.65s/it] 67%|██████▋ | 350/520 [22:02<10:20, 3.65s/it] {'loss': 1.2335, 'grad_norm': 0.002012845087144897, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:02<10:20, 3.65s/it] 68%|██████▊ | 351/520 [22:06<10:20, 3.67s/it] {'loss': 1.1364, 'grad_norm': 0.0017447581922408883, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:06<10:20, 3.67s/it] 68%|██████▊ | 352/520 [22:10<10:17, 3.68s/it] {'loss': 1.2617, 'grad_norm': 0.0018057297185402853, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:10<10:17, 3.68s/it] 68%|██████▊ | 353/520 [22:13<10:15, 3.68s/it] {'loss': 1.2171, 'grad_norm': 0.0016214839855014475, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:13<10:15, 3.68s/it] 68%|██████▊ | 354/520 [22:17<10:13, 3.69s/it] {'loss': 1.3573, 'grad_norm': 0.001928556355005755, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:17<10:13, 3.69s/it] 68%|██████▊ | 355/520 [22:21<10:07, 3.68s/it] {'loss': 1.2007, 'grad_norm': 0.0019153819301667404, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:21<10:07, 3.68s/it] 68%|██████▊ | 356/520 [22:24<10:02, 3.67s/it] {'loss': 1.1975, 'grad_norm': 0.0019185145258541465, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:24<10:02, 3.67s/it] 69%|██████▊ | 357/520 [22:28<09:57, 3.66s/it] {'loss': 1.2247, 'grad_norm': 0.001763058216852203, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:28<09:57, 3.66s/it] 69%|██████▉ | 358/520 [22:32<09:53, 3.66s/it] {'loss': 1.1562, 'grad_norm': 0.0018940835223277585, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:32<09:53, 3.66s/it] 69%|██████▉ | 359/520 [22:35<09:51, 3.67s/it] {'loss': 1.2821, 'grad_norm': 0.0020796163436142675, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:35<09:51, 3.67s/it] 69%|██████▉ | 360/520 [22:39<09:47, 3.67s/it] {'loss': 1.3124, 'grad_norm': 0.0021517442732386684, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:39<09:47, 3.67s/it] 69%|██████▉ | 361/520 [22:43<09:46, 3.69s/it] {'loss': 1.2935, 'grad_norm': 0.00177596289865426, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:43<09:46, 3.69s/it] 70%|██████▉ | 362/520 [22:46<09:43, 3.69s/it] {'loss': 1.2192, 'grad_norm': 0.0023973022451422993, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:46<09:43, 3.69s/it] 70%|██████▉ | 363/520 [22:50<09:39, 3.69s/it] {'loss': 1.2358, 'grad_norm': 0.0018364944127009925, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:50<09:39, 3.69s/it] 70%|███████ | 364/520 [22:54<09:36, 3.70s/it] {'loss': 1.3115, 'grad_norm': 0.0018859130959935847, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:54<09:36, 3.70s/it] 70%|███████ | 365/520 [22:57<09:31, 3.69s/it] {'loss': 1.3055, 'grad_norm': 0.002026557160125754, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:57<09:31, 3.69s/it] 70%|███████ | 366/520 [23:01<09:26, 3.68s/it] {'loss': 1.2499, 'grad_norm': 0.001858150089215726, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:01<09:26, 3.68s/it] 71%|███████ | 367/520 [23:05<09:23, 3.68s/it] {'loss': 1.2484, 'grad_norm': 0.001879439327207045, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:05<09:23, 3.68s/it] 71%|███████ | 368/520 [23:08<09:17, 3.67s/it] {'loss': 1.1036, 'grad_norm': 0.001980133724798936, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:08<09:17, 3.67s/it] 71%|███████ | 369/520 [23:12<09:13, 3.66s/it] {'loss': 1.2622, 'grad_norm': 0.0017753372611588288, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:12<09:13, 3.66s/it] 71%|███████ | 370/520 [23:16<09:10, 3.67s/it] {'loss': 1.1652, 'grad_norm': 0.0017374561695893932, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:16<09:10, 3.67s/it] 71%|███████▏ | 371/520 [23:19<09:08, 3.68s/it] {'loss': 1.1617, 'grad_norm': 0.0019526763101057444, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:19<09:08, 3.68s/it] 72%|███████▏ | 372/520 [23:23<09:11, 3.73s/it] {'loss': 1.3595, 'grad_norm': 0.001751861844797334, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:23<09:11, 3.73s/it] 72%|███████▏ | 373/520 [23:27<09:13, 3.77s/it] {'loss': 1.2384, 'grad_norm': 0.00201117132440087, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:27<09:13, 3.77s/it] 72%|███████▏ | 374/520 [23:31<09:13, 3.79s/it] {'loss': 1.2443, 'grad_norm': 0.0018367935517434367, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:31<09:13, 3.79s/it] 72%|███████▏ | 375/520 [23:35<09:12, 3.81s/it] {'loss': 1.1616, 'grad_norm': 0.0019279624246725869, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:35<09:12, 3.81s/it] 72%|███████▏ | 376/520 [23:39<09:09, 3.81s/it] {'loss': 1.2799, 'grad_norm': 0.001743707601290526, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:39<09:09, 3.81s/it] 72%|███████▎ | 377/520 [23:42<09:06, 3.82s/it] {'loss': 1.2169, 'grad_norm': 0.0019946192200351034, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:42<09:06, 3.82s/it] 73%|███████▎ | 378/520 [23:46<09:05, 3.84s/it] {'loss': 1.2666, 'grad_norm': 0.0018540425380794792, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:46<09:05, 3.84s/it] 73%|███████▎ | 379/520 [23:50<09:01, 3.84s/it] {'loss': 1.2518, 'grad_norm': 0.0017594991931363776, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:50<09:01, 3.84s/it] 73%|███████▎ | 380/520 [23:54<08:57, 3.84s/it] {'loss': 1.3276, 'grad_norm': 0.0020508766090486883, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:54<08:57, 3.84s/it] 73%|███████▎ | 381/520 [23:58<08:53, 3.84s/it] {'loss': 1.2474, 'grad_norm': 0.0018335170922975774, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:58<08:53, 3.84s/it] 73%|███████▎ | 382/520 [24:02<08:53, 3.86s/it] {'loss': 1.2775, 'grad_norm': 0.0018583133407646722, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:02<08:53, 3.86s/it] 74%|███████▎ | 383/520 [24:06<08:45, 3.84s/it] {'loss': 1.0856, 'grad_norm': 0.001982726435194079, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:06<08:45, 3.84s/it] 74%|███████▍ | 384/520 [24:09<08:34, 3.78s/it] {'loss': 1.365, 'grad_norm': 0.0019495860619819993, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:09<08:34, 3.78s/it] 74%|███████▍ | 385/520 [24:13<08:26, 3.75s/it] {'loss': 1.2256, 'grad_norm': 0.0017869459395268646, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:13<08:26, 3.75s/it] 74%|███████▍ | 386/520 [24:17<08:20, 3.74s/it] {'loss': 1.1768, 'grad_norm': 0.0016445164168764823, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:17<08:20, 3.74s/it] 74%|███████▍ | 387/520 [24:20<08:15, 3.73s/it] {'loss': 1.3554, 'grad_norm': 0.0018379871597894095, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:20<08:15, 3.73s/it] 75%|███████▍ | 388/520 [24:24<08:09, 3.71s/it] {'loss': 1.1229, 'grad_norm': 0.0017318532876175775, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:24<08:09, 3.71s/it] 75%|███████▍ | 389/520 [24:28<08:05, 3.71s/it] {'loss': 1.1836, 'grad_norm': 0.002182165893031877, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:28<08:05, 3.71s/it] 75%|███████▌ | 390/520 [24:31<08:03, 3.72s/it] {'loss': 1.2488, 'grad_norm': 0.0017435129651833991, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:31<08:03, 3.72s/it] 75%|███████▌ | 391/520 [24:35<08:07, 3.78s/it] {'loss': 1.3276, 'grad_norm': 0.0019040032136771678, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:35<08:07, 3.78s/it] 75%|███████▌ | 392/520 [24:39<08:07, 3.81s/it] {'loss': 1.1329, 'grad_norm': 0.0017841728113707485, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:39<08:07, 3.81s/it] 76%|███████▌ | 393/520 [24:43<07:59, 3.78s/it] {'loss': 1.1694, 'grad_norm': 0.0016213752658539359, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:43<07:59, 3.78s/it] 76%|███████▌ | 394/520 [24:47<07:52, 3.75s/it] {'loss': 1.194, 'grad_norm': 0.002009475274964781, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:47<07:52, 3.75s/it] 76%|███████▌ | 395/520 [24:50<07:45, 3.73s/it] {'loss': 1.1606, 'grad_norm': 0.002084657480014317, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:50<07:45, 3.73s/it] 76%|███████▌ | 396/520 [24:54<07:39, 3.71s/it] {'loss': 1.2473, 'grad_norm': 0.0019527714904277673, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:54<07:39, 3.71s/it] 76%|███████▋ | 397/520 [24:58<07:35, 3.70s/it] {'loss': 1.2302, 'grad_norm': 0.0017787571902053358, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:58<07:35, 3.70s/it] 77%|███████▋ | 398/520 [25:01<07:29, 3.69s/it] {'loss': 1.2245, 'grad_norm': 0.0018733802727334242, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:01<07:29, 3.69s/it] 77%|███████▋ | 399/520 [25:05<07:26, 3.69s/it] {'loss': 1.2141, 'grad_norm': 0.0018136213314886695, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:05<07:26, 3.69s/it] 77%|███████▋ | 400/520 [25:09<07:23, 3.70s/it] {'loss': 1.2645, 'grad_norm': 0.0019020152164516203, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:09<07:23, 3.70s/it] 77%|███████▋ | 401/520 [25:12<07:19, 3.69s/it] {'loss': 1.0473, 'grad_norm': 0.0019341152756506457, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:12<07:19, 3.69s/it] 77%|███████▋ | 402/520 [25:16<07:15, 3.69s/it] {'loss': 1.1724, 'grad_norm': 0.002037017515133387, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:16<07:15, 3.69s/it] 78%|███████▊ | 403/520 [25:20<07:12, 3.69s/it] {'loss': 1.2046, 'grad_norm': 0.0020771105616557758, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:20<07:12, 3.69s/it] 78%|███████▊ | 404/520 [25:23<07:06, 3.68s/it] {'loss': 1.1101, 'grad_norm': 0.0023206368270777245, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:23<07:06, 3.68s/it] 78%|███████▊ | 405/520 [25:27<07:03, 3.69s/it] {'loss': 1.2202, 'grad_norm': 0.0017979765000070615, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:27<07:03, 3.69s/it] 78%|███████▊ | 406/520 [25:31<07:00, 3.68s/it] {'loss': 1.1523, 'grad_norm': 0.002169463446411177, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:31<07:00, 3.68s/it] 78%|███████▊ | 407/520 [25:34<06:56, 3.68s/it] {'loss': 1.2979, 'grad_norm': 0.0018752697944585265, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:35<06:56, 3.68s/it] 78%|███████▊ | 408/520 [25:38<06:51, 3.67s/it] {'loss': 1.185, 'grad_norm': 0.001962220479325609, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:38<06:51, 3.67s/it] 79%|███████▊ | 409/520 [25:42<06:47, 3.67s/it] {'loss': 1.318, 'grad_norm': 0.002111796761940862, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:42<06:47, 3.67s/it] 79%|███████▉ | 410/520 [25:45<06:43, 3.67s/it] {'loss': 1.0336, 'grad_norm': 0.0018621295216597797, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:45<06:43, 3.67s/it] 79%|███████▉ | 411/520 [25:49<06:39, 3.67s/it] {'loss': 1.2906, 'grad_norm': 0.0021321760568351997, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:49<06:39, 3.67s/it] 79%|███████▉ | 412/520 [25:53<06:35, 3.66s/it] {'loss': 1.2042, 'grad_norm': 0.0018538058676521847, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:53<06:35, 3.66s/it] 79%|███████▉ | 413/520 [25:56<06:32, 3.67s/it] {'loss': 1.2537, 'grad_norm': 0.002141426182756747, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:56<06:32, 3.67s/it] 80%|███████▉ | 414/520 [26:00<06:29, 3.67s/it] {'loss': 1.0462, 'grad_norm': 0.0016408718672433049, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:00<06:29, 3.67s/it] 80%|███████▉ | 415/520 [26:04<06:25, 3.67s/it] {'loss': 1.1759, 'grad_norm': 0.001887706434444241, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:04<06:25, 3.67s/it] 80%|████████ | 416/520 [26:08<06:22, 3.67s/it] {'loss': 1.0958, 'grad_norm': 0.0021095052181244775, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:08<06:22, 3.67s/it] 80%|████████ | 417/520 [26:11<06:18, 3.67s/it] {'loss': 1.2629, 'grad_norm': 0.002121267166372948, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:11<06:18, 3.67s/it] 80%|████████ | 418/520 [26:15<06:14, 3.67s/it] {'loss': 1.2451, 'grad_norm': 0.0018713915458864379, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:15<06:14, 3.67s/it] 81%|████████ | 419/520 [26:18<06:09, 3.66s/it] {'loss': 1.2348, 'grad_norm': 0.002023362124254539, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:18<06:09, 3.66s/it] 81%|████████ | 420/520 [26:22<06:05, 3.66s/it] {'loss': 1.1246, 'grad_norm': 0.0020020297927216585, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:22<06:05, 3.66s/it] 81%|████████ | 421/520 [26:26<06:02, 3.67s/it] {'loss': 1.0535, 'grad_norm': 0.002034558405875443, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:26<06:02, 3.67s/it] 81%|████████ | 422/520 [26:29<05:58, 3.66s/it] {'loss': 1.1774, 'grad_norm': 0.0020001422279122038, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:29<05:58, 3.66s/it] 81%|████████▏ | 423/520 [26:33<05:54, 3.65s/it] {'loss': 1.1661, 'grad_norm': 0.0021984097741718296, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:33<05:54, 3.65s/it] 82%|████████▏ | 424/520 [26:37<05:51, 3.66s/it] {'loss': 1.3312, 'grad_norm': 0.002023640516453171, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:37<05:51, 3.66s/it] 82%|████████▏ | 425/520 [26:40<05:47, 3.66s/it] {'loss': 1.1695, 'grad_norm': 0.0018149753958320279, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:40<05:47, 3.66s/it] 82%|████████▏ | 426/520 [26:44<05:42, 3.65s/it] {'loss': 1.1909, 'grad_norm': 0.002485576094678523, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:44<05:42, 3.65s/it] 82%|████████▏ | 427/520 [26:48<05:42, 3.69s/it] {'loss': 1.1057, 'grad_norm': 0.0018305050857735895, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:48<05:42, 3.69s/it] 82%|████████▏ | 428/520 [26:52<05:45, 3.76s/it] {'loss': 1.0867, 'grad_norm': 0.001898191962112564, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:52<05:45, 3.76s/it] 82%|████████▎ | 429/520 [26:56<05:46, 3.81s/it] {'loss': 1.1832, 'grad_norm': 0.0017984169406029926, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:56<05:46, 3.81s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:00<05:45, 3.83s/it] {'loss': 1.178, 'grad_norm': 0.0017135756663253566, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:00<05:45, 3.83s/it] 83%|████████▎ | 431/520 [27:03<05:43, 3.86s/it] {'loss': 1.2169, 'grad_norm': 0.001957234319702304, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:03<05:43, 3.86s/it] 83%|████████▎ | 432/520 [27:07<05:40, 3.87s/it] {'loss': 1.0908, 'grad_norm': 0.0020391881201510973, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:07<05:40, 3.87s/it] 83%|████████▎ | 433/520 [27:11<05:37, 3.87s/it] {'loss': 1.226, 'grad_norm': 0.0018887919086711018, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:11<05:37, 3.87s/it] 83%|████████▎ | 434/520 [27:15<05:34, 3.89s/it] {'loss': 0.969, 'grad_norm': 0.0019064380172681436, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:15<05:34, 3.89s/it] 84%|████████▎ | 435/520 [27:19<05:30, 3.89s/it] {'loss': 1.2652, 'grad_norm': 0.002238712533495057, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:19<05:30, 3.89s/it] 84%|████████▍ | 436/520 [27:23<05:27, 3.90s/it] {'loss': 1.0534, 'grad_norm': 0.0019950195314299276, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:23<05:27, 3.90s/it] 84%|████████▍ | 437/520 [27:27<05:23, 3.90s/it] {'loss': 1.2891, 'grad_norm': 0.0018924057265140685, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:27<05:23, 3.90s/it] 84%|████████▍ | 438/520 [27:31<05:20, 3.91s/it] {'loss': 1.0955, 'grad_norm': 0.0018164965933731782, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:31<05:20, 3.91s/it] 84%|████████▍ | 439/520 [27:35<05:16, 3.91s/it] {'loss': 1.1831, 'grad_norm': 0.0015785840339118155, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:35<05:16, 3.91s/it] 85%|████████▍ | 440/520 [27:39<05:12, 3.90s/it] {'loss': 1.1424, 'grad_norm': 0.001861755963475495, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:39<05:12, 3.90s/it] 85%|████████▍ | 441/520 [27:43<05:08, 3.90s/it] {'loss': 1.2021, 'grad_norm': 0.001807235794534003, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:43<05:08, 3.90s/it] 85%|████████▌ | 442/520 [27:46<05:04, 3.90s/it] {'loss': 1.2028, 'grad_norm': 0.002142423767585661, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:46<05:04, 3.90s/it] 85%|████████▌ | 443/520 [27:50<05:00, 3.91s/it] {'loss': 1.2164, 'grad_norm': 0.001908081938927341, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:50<05:00, 3.91s/it] 85%|████████▌ | 444/520 [27:54<04:56, 3.90s/it] {'loss': 1.1805, 'grad_norm': 0.0017093320094552378, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:54<04:56, 3.90s/it] 86%|████████▌ | 445/520 [27:58<04:51, 3.89s/it] {'loss': 1.1055, 'grad_norm': 0.0017910794661580188, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:58<04:51, 3.89s/it] 86%|████████▌ | 446/520 [28:02<04:48, 3.89s/it] {'loss': 1.2856, 'grad_norm': 0.0017670985436144902, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:02<04:48, 3.89s/it] 86%|████████▌ | 447/520 [28:06<04:44, 3.90s/it] {'loss': 1.1947, 'grad_norm': 0.0018742323885775724, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:06<04:44, 3.90s/it] 86%|████████▌ | 448/520 [28:10<04:41, 3.91s/it] {'loss': 1.1748, 'grad_norm': 0.0019852350157088788, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [30:13<04:41, 3.91s/it] 86%|████████▋ | 449/520 [30:17<48:20, 40.86s/it] {'loss': 1.2426, 'grad_norm': 0.001875307228239057, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [30:17<48:20, 40.86s/it] 87%|████████▋ | 450/520 [30:21<34:42, 29.75s/it] {'loss': 1.2075, 'grad_norm': 0.001903614495168305, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [30:21<34:42, 29.75s/it] 87%|████████▋ | 451/520 [30:24<25:12, 21.92s/it] {'loss': 1.2056, 'grad_norm': 0.0019374096988901601, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [30:24<25:12, 21.92s/it] 87%|████████▋ | 452/520 [30:28<18:37, 16.44s/it] {'loss': 1.2784, 'grad_norm': 0.0017850416844586248, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [30:28<18:37, 16.44s/it] 87%|████████▋ | 453/520 [30:32<14:04, 12.61s/it] {'loss': 1.2573, 'grad_norm': 0.0018047027871373636, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [30:32<14:04, 12.61s/it] 87%|████████▋ | 454/520 [30:35<10:54, 9.92s/it] {'loss': 1.1199, 'grad_norm': 0.001945820036279633, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [30:35<10:54, 9.92s/it] 88%|████████▊ | 455/520 [30:39<08:42, 8.04s/it] {'loss': 1.2555, 'grad_norm': 0.0018280603032622322, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [30:39<08:42, 8.04s/it] 88%|████████▊ | 456/520 [30:43<07:11, 6.75s/it] {'loss': 1.1725, 'grad_norm': 0.0019190774141459323, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [30:43<07:11, 6.75s/it] 88%|████████▊ | 457/520 [30:46<06:06, 5.82s/it] {'loss': 1.1962, 'grad_norm': 0.0017487948025807123, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [30:46<06:06, 5.82s/it] 88%|████████▊ | 458/520 [30:50<05:21, 5.18s/it] {'loss': 1.3176, 'grad_norm': 0.0020701435541899604, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [30:50<05:21, 5.18s/it] 88%|████████▊ | 459/520 [30:54<04:49, 4.74s/it] {'loss': 1.246, 'grad_norm': 0.0018465757358280357, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [30:54<04:49, 4.74s/it] 88%|████████▊ | 460/520 [30:57<04:25, 4.42s/it] {'loss': 1.1223, 'grad_norm': 0.0018871194797971786, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [30:57<04:25, 4.42s/it] 89%|████████▊ | 461/520 [31:01<04:07, 4.20s/it] {'loss': 1.2864, 'grad_norm': 0.0016014752750396172, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [31:01<04:07, 4.20s/it] 89%|████████▉ | 462/520 [31:05<03:53, 4.03s/it] {'loss': 1.3291, 'grad_norm': 0.0018938729883049318, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [31:05<03:53, 4.03s/it] 89%|████████▉ | 463/520 [31:08<03:43, 3.91s/it] {'loss': 1.0777, 'grad_norm': 0.0019461270715937157, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [31:08<03:43, 3.91s/it] 89%|████████▉ | 464/520 [31:12<03:34, 3.84s/it] {'loss': 1.2264, 'grad_norm': 0.00195309680649393, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [31:12<03:34, 3.84s/it] 89%|████████▉ | 465/520 [31:16<03:28, 3.79s/it] {'loss': 1.3399, 'grad_norm': 0.0021313711418095176, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [31:16<03:28, 3.79s/it] 90%|████████▉ | 466/520 [31:19<03:21, 3.74s/it] {'loss': 1.2057, 'grad_norm': 0.0017097934003887675, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [31:19<03:21, 3.74s/it] 90%|████████▉ | 467/520 [31:23<03:17, 3.72s/it] {'loss': 1.2114, 'grad_norm': 0.00179412510666796, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [31:23<03:17, 3.72s/it] 90%|█████████ | 468/520 [31:27<03:12, 3.69s/it] {'loss': 1.189, 'grad_norm': 0.002077629827668159, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [31:27<03:12, 3.69s/it] 90%|█████████ | 469/520 [31:30<03:07, 3.67s/it] {'loss': 1.2474, 'grad_norm': 0.002001868835633449, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [31:30<03:07, 3.67s/it] 90%|█████████ | 470/520 [31:34<03:05, 3.72s/it] {'loss': 1.1227, 'grad_norm': 0.0016580815244040899, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [31:34<03:05, 3.72s/it] 91%|█████████ | 471/520 [31:38<03:04, 3.76s/it] {'loss': 1.1473, 'grad_norm': 0.0018685997544866534, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [31:38<03:04, 3.76s/it] 91%|█████████ | 472/520 [31:42<03:02, 3.80s/it] {'loss': 1.117, 'grad_norm': 0.0018510035897494766, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [31:42<03:02, 3.80s/it] 91%|█████████ | 473/520 [31:46<02:59, 3.82s/it] {'loss': 1.173, 'grad_norm': 0.0019253028103838046, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [31:46<02:59, 3.82s/it] 91%|█████████ | 474/520 [31:50<02:55, 3.82s/it] {'loss': 1.2527, 'grad_norm': 0.0017374627040667326, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [31:50<02:55, 3.82s/it] 91%|█████████▏| 475/520 [31:53<02:52, 3.84s/it] {'loss': 1.1662, 'grad_norm': 0.0017254843319126955, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [31:53<02:52, 3.84s/it] 92%|█████████▏| 476/520 [31:57<02:49, 3.85s/it] {'loss': 1.1721, 'grad_norm': 0.0019077826390658857, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [31:57<02:49, 3.85s/it] 92%|█████████▏| 477/520 [32:01<02:45, 3.85s/it] {'loss': 1.1576, 'grad_norm': 0.0020949104457182563, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [32:01<02:45, 3.85s/it] 92%|█████████▏| 478/520 [32:05<02:42, 3.87s/it] {'loss': 1.1141, 'grad_norm': 0.0018273342536190775, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [32:05<02:42, 3.87s/it] 92%|█████████▏| 479/520 [32:09<02:39, 3.89s/it] {'loss': 1.2149, 'grad_norm': 0.0019148369646477582, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [32:09<02:39, 3.89s/it] 92%|█████████▏| 480/520 [32:13<02:35, 3.89s/it] {'loss': 1.2492, 'grad_norm': 0.0017698375088976586, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [32:13<02:35, 3.89s/it] 92%|█████████▎| 481/520 [32:17<02:31, 3.89s/it] {'loss': 1.2356, 'grad_norm': 0.0017413896081954977, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [32:17<02:31, 3.89s/it] 93%|█████████▎| 482/520 [32:21<02:27, 3.88s/it] {'loss': 1.2611, 'grad_norm': 0.0021655989858511255, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [32:21<02:27, 3.88s/it] 93%|█████████▎| 483/520 [32:25<02:23, 3.87s/it] {'loss': 1.1831, 'grad_norm': 0.0020236674109529638, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [32:25<02:23, 3.87s/it] 93%|█████████▎| 484/520 [32:28<02:19, 3.88s/it] {'loss': 1.1947, 'grad_norm': 0.0018755765542672047, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [32:28<02:19, 3.88s/it] 93%|█████████▎| 485/520 [32:32<02:15, 3.88s/it] {'loss': 1.1382, 'grad_norm': 0.0018063353570280474, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [32:32<02:15, 3.88s/it] 93%|█████████▎| 486/520 [32:36<02:11, 3.88s/it] {'loss': 1.2595, 'grad_norm': 0.0019431091892771948, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [32:36<02:11, 3.88s/it] 94%|█████████▎| 487/520 [32:40<02:07, 3.88s/it] {'loss': 1.1108, 'grad_norm': 0.0017588279022515037, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [32:40<02:07, 3.88s/it] 94%|█████████▍| 488/520 [32:44<02:02, 3.83s/it] {'loss': 1.0577, 'grad_norm': 0.0018893360199667044, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [32:44<02:02, 3.83s/it] 94%|█████████▍| 489/520 [32:47<01:56, 3.77s/it] {'loss': 1.2487, 'grad_norm': 0.0016393788424630795, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [32:47<01:56, 3.77s/it] 94%|█████████▍| 490/520 [32:51<01:52, 3.74s/it] {'loss': 1.1767, 'grad_norm': 0.0019695572517381093, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [32:51<01:52, 3.74s/it] 94%|█████████▍| 491/520 [32:55<01:48, 3.73s/it] {'loss': 1.1371, 'grad_norm': 0.0019028819018814163, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [32:55<01:48, 3.73s/it] 95%|█████████▍| 492/520 [32:58<01:43, 3.71s/it] {'loss': 1.2613, 'grad_norm': 0.0019454328639785467, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [32:58<01:43, 3.71s/it] 95%|█████████▍| 493/520 [33:02<01:39, 3.69s/it] {'loss': 1.2809, 'grad_norm': 0.001944836117600219, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [33:02<01:39, 3.69s/it] 95%|█████████▌| 494/520 [33:06<01:35, 3.69s/it] {'loss': 1.201, 'grad_norm': 0.0017278526162806352, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [33:06<01:35, 3.69s/it] 95%|█████████▌| 495/520 [33:09<01:31, 3.68s/it] {'loss': 1.1533, 'grad_norm': 0.0018371100923492028, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [33:09<01:31, 3.68s/it] 95%|█████████▌| 496/520 [33:13<01:28, 3.68s/it] {'loss': 1.0752, 'grad_norm': 0.0018268623095353982, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [33:13<01:28, 3.68s/it] 96%|█████████▌| 497/520 [33:17<01:25, 3.70s/it] {'loss': 1.1754, 'grad_norm': 0.0016439906418615044, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [33:17<01:25, 3.70s/it] 96%|█████████▌| 498/520 [33:21<01:21, 3.70s/it] {'loss': 1.1588, 'grad_norm': 0.0019386693682386465, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [33:21<01:21, 3.70s/it] 96%|█████████▌| 499/520 [33:24<01:17, 3.70s/it] {'loss': 1.3181, 'grad_norm': 0.002018777473649022, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [33:24<01:17, 3.70s/it] 96%|█████████▌| 500/520 [33:28<01:13, 3.69s/it] {'loss': 1.276, 'grad_norm': 0.0021415835575541472, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [33:28<01:13, 3.69s/it] 96%|█████████▋| 501/520 [33:32<01:09, 3.68s/it] {'loss': 1.2311, 'grad_norm': 0.0021418119984178007, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [33:32<01:09, 3.68s/it] 97%|█████████▋| 502/520 [33:35<01:06, 3.68s/it] {'loss': 1.1973, 'grad_norm': 0.0017415105458187384, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [33:35<01:06, 3.68s/it] 97%|█████████▋| 503/520 [33:39<01:02, 3.67s/it] {'loss': 1.2078, 'grad_norm': 0.0018828928665092302, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [33:39<01:02, 3.67s/it] 97%|█████████▋| 504/520 [33:43<00:58, 3.67s/it] {'loss': 1.2015, 'grad_norm': 0.002397094025070912, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [33:43<00:58, 3.67s/it] 97%|█████████▋| 505/520 [33:46<00:54, 3.67s/it] {'loss': 1.2349, 'grad_norm': 0.0018241466785301299, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [33:46<00:54, 3.67s/it] 97%|█████████▋| 506/520 [33:50<00:51, 3.67s/it] {'loss': 1.1486, 'grad_norm': 0.0019477286211350812, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [33:50<00:51, 3.67s/it] 98%|█████████▊| 507/520 [33:54<00:47, 3.69s/it] {'loss': 1.3601, 'grad_norm': 0.001779062125901195, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [33:54<00:47, 3.69s/it] 98%|█████████▊| 508/520 [33:57<00:44, 3.68s/it] {'loss': 1.2678, 'grad_norm': 0.0018593913126699496, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [33:57<00:44, 3.68s/it] 98%|█████████▊| 509/520 [34:01<00:40, 3.67s/it] {'loss': 1.2371, 'grad_norm': 0.0018266586297638109, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [34:01<00:40, 3.67s/it] 98%|█████████▊| 510/520 [34:05<00:36, 3.67s/it] {'loss': 1.191, 'grad_norm': 0.001826857257626663, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [34:05<00:36, 3.67s/it] 98%|█████████▊| 511/520 [34:08<00:33, 3.68s/it] {'loss': 1.1652, 'grad_norm': 0.001790727122342828, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [34:08<00:33, 3.68s/it] 98%|█████████▊| 512/520 [34:12<00:29, 3.69s/it] {'loss': 1.0447, 'grad_norm': 0.0018730563858997895, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [34:12<00:29, 3.69s/it] 99%|█████████▊| 513/520 [34:16<00:25, 3.69s/it] {'loss': 1.2465, 'grad_norm': 0.0022443362659981163, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [34:16<00:25, 3.69s/it] 99%|█████████▉| 514/520 [34:19<00:22, 3.69s/it] {'loss': 1.2228, 'grad_norm': 0.0018228191867838697, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [34:19<00:22, 3.69s/it] 99%|█████████▉| 515/520 [34:23<00:18, 3.69s/it] {'loss': 1.267, 'grad_norm': 0.0020737234335585658, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [34:23<00:18, 3.69s/it] 99%|█████████▉| 516/520 [34:27<00:14, 3.70s/it] {'loss': 1.1605, 'grad_norm': 0.0017864866381024315, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [34:27<00:14, 3.70s/it] 99%|█████████▉| 517/520 [34:30<00:11, 3.69s/it] {'loss': 1.2757, 'grad_norm': 0.0019393099927560704, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [34:30<00:11, 3.69s/it] 100%|█████████▉| 518/520 [34:34<00:07, 3.68s/it] {'loss': 1.1829, 'grad_norm': 0.0019710132731526127, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [34:34<00:07, 3.68s/it] 100%|█████████▉| 519/520 [34:38<00:03, 3.68s/it] {'loss': 1.2143, 'grad_norm': 0.0018702158982314538, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [34:38<00:03, 3.68s/it] 100%|██████████| 520/520 [34:42<00:00, 3.93s/it] {'loss': 1.263, 'grad_norm': 0.0018897601096703994, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [34:42<00:00, 3.93s/it] {'train_runtime': 2082.8687, 'train_samples_per_second': 31.941, 'train_steps_per_second': 0.25, 'train_loss': 1.3238919244362757, 'epoch': 1.0} + 100%|██████████| 520/520 [34:42<00:00, 3.93s/it] 100%|██████████| 520/520 [34:42<00:00, 4.01s/it] +[2025-10-13 11:32:08,838] [INFO] [launch.py:348:main] Process 694231 exits successfully. +[2025-10-13 11:32:08,839] [INFO] [launch.py:348:main] Process 694232 exits successfully. +[2025-10-13 11:32:08,839] [INFO] [launch.py:348:main] Process 694229 exits successfully. +[2025-10-13 11:32:08,839] [INFO] [launch.py:348:main] Process 694226 exits successfully. +[2025-10-13 11:32:09,841] [INFO] [launch.py:348:main] Process 694227 exits successfully. +[2025-10-13 11:32:09,841] [INFO] [launch.py:348:main] Process 694228 exits successfully. +[2025-10-13 11:32:09,842] [INFO] [launch.py:348:main] Process 694230 exits successfully. +[2025-10-13 11:32:13,846] [INFO] [launch.py:348:main] Process 694225 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_1.9_2e-1_connector-3.0_1.9_2e-1_ablation_20251013_104850.log +Timestamp: 2025-10-13 11:32:16 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation_20251013_113216.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation_20251013_113216.log new file mode 100644 index 0000000000000000000000000000000000000000..a82cbbeb82c1d5d358ab23f48d2bdea2f0de185c --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation_20251013_113216.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation_20251013_113216.log +Timestamp: 2025-10-13 11:32:16 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 11:32:19,195] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:21,878] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 11:32:21,880] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 2.1 --temperature_mlp_text 2.1 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 2.1 --temperature_mlp_vision 2.1 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 2.1 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 11:32:24,527] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:25,560] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 11:32:25,560] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 11:32:25,560] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 11:32:25,560] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 11:32:25,560] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 11:32:25,560] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 11:32:25,560] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 11:32:25,563] [INFO] [launch.py:253:main] process 711967 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 11:32:25,565] [INFO] [launch.py:253:main] process 711968 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 11:32:25,567] [INFO] [launch.py:253:main] process 711969 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 11:32:25,569] [INFO] [launch.py:253:main] process 711970 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 11:32:25,571] [INFO] [launch.py:253:main] process 711971 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 11:32:25,574] [INFO] [launch.py:253:main] process 711972 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 11:32:25,576] [INFO] [launch.py:253:main] process 711973 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 11:32:25,578] [INFO] [launch.py:253:main] process 711974 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 11:32:32,195] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:32,448] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:32,500] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:32,501] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:32,510] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:32,523] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:32,524] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:32,551] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 11:32:32,610] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 11:32:32,860] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 11:32:32,911] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 11:32:32,913] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 11:32:32,928] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 11:32:32,928] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 11:32:32,937] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 11:32:32,943] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 11:32:32,964] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 2.1, 'temperature_mlp': 2.1, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 2.1, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.1, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.1, + "temperature_mlp": 2.1, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:711967:711967 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711967:711967 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711967:711967 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:711967:711967 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:711967:711967 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:711967:711967 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:711971:711971 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:711971:711971 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711971:711971 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711971:711971 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:711971:711971 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:711971:711971 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:711969:711969 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:711969:711969 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711969:711969 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711969:711969 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:711969:711969 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:711969:711969 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:711972:711972 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:711972:711972 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711972:711972 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711972:711972 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:711972:711972 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:711972:711972 [5] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:711974:711974 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:711974:711974 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711974:711974 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711974:711974 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:711974:711974 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:711974:711974 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:711970:711970 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:711970:711970 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711970:711970 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711970:711970 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:711970:711970 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:711970:711970 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:711973:711973 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:711973:711973 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711973:711973 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711973:711973 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:711973:711973 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:711973:711973 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:711968:711968 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:711968:711968 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711968:711968 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711968:711968 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:711968:711968 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:711968:711968 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO ncclCommInitRank comm 0x557af2118560 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xdfc472f2d7d52d72 - Init START +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO ncclCommInitRank comm 0x55cf674a2830 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xdfc472f2d7d52d72 - Init START +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO ncclCommInitRank comm 0x555b877c8650 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xdfc472f2d7d52d72 - Init START +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO ncclCommInitRank comm 0x560cdb77efe0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xdfc472f2d7d52d72 - Init START +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO ncclCommInitRank comm 0x556ef01974d0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xdfc472f2d7d52d72 - Init START +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO ncclCommInitRank comm 0x55fa6898c800 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xdfc472f2d7d52d72 - Init START +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO ncclCommInitRank comm 0x55c9ff5f2900 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xdfc472f2d7d52d72 - Init START +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO ncclCommInitRank comm 0x56478b871700 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xdfc472f2d7d52d72 - Init START +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO comm 0x55cf674a2830 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO comm 0x555b877c8650 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO comm 0x55c9ff5f2900 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO comm 0x556ef01974d0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO comm 0x55fa6898c800 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO comm 0x56478b871700 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO comm 0x560cdb77efe0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO comm 0x557af2118560 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:711974:713582 [7] NCCL INFO ncclCommInitRank comm 0x55c9ff5f2900 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xdfc472f2d7d52d72 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711972:713570 [5] NCCL INFO ncclCommInitRank comm 0x56478b871700 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xdfc472f2d7d52d72 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711970:713583 [3] NCCL INFO ncclCommInitRank comm 0x560cdb77efe0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xdfc472f2d7d52d72 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711968:713585 [1] NCCL INFO ncclCommInitRank comm 0x555b877c8650 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xdfc472f2d7d52d72 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:711971:713568 [4] NCCL INFO ncclCommInitRank comm 0x557af2118560 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xdfc472f2d7d52d72 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:711967:713563 [0] NCCL INFO ncclCommInitRank comm 0x556ef01974d0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xdfc472f2d7d52d72 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:711973:713584 [6] NCCL INFO ncclCommInitRank comm 0x55fa6898c800 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xdfc472f2d7d52d72 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:711969:713569 [2] NCCL INFO ncclCommInitRank comm 0x55cf674a2830 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xdfc472f2d7d52d72 - Init COMPLETE +[2025-10-13 11:33:15,188] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.laloading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_modelyers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 12:30:05,881] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 12:30:24,406 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 12:30:24,411 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:001->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:711970:719546 [3] NCCL INFO ncclCommInitRank comm 0x7f167806b3b0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xa175d2341b582298 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711974:719550 [7] NCCL INFO ncclCommInitRank comm 0x7f55a006a7a0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xa175d2341b582298 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711972:719549 [5] NCCL INFO ncclCommInitRank comm 0x7efaf006a4f0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xa175d2341b582298 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711973:719544 [6] NCCL INFO ncclCommInitRank comm 0x7f605806b630 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xa175d2341b582298 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711971:719548 [4] NCCL INFO ncclCommInitRank comm 0x7fc5b006ad00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xa175d2341b582298 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711968:719547 [1] NCCL INFO ncclCommInitRank comm 0x7f7c6c06a5c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xa175d2341b582298 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711967:719543 [0] NCCL INFO ncclCommInitRank comm 0x7fbba406bad0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xa175d2341b582298 - Init COMPLETE +ywang29-vrdb-test1-worker-0:711969:719545 [2] NCCL INFO ncclCommInitRank comm 0x7fded806b070 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xa175d2341b582298 - Init COMPLETE + 0%| | 1/520 [00:14<2:01:31, 14.05s/it] {'loss': 3.139, 'grad_norm': 0.16703762987270904, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:01:31, 14.05s/it] 0%| | 2/520 [00:17<1:09:11, 8.01s/it] {'loss': 2.927, 'grad_norm': 0.14590244291414436, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:11, 8.01s/it] 1%| | 3/520 [00:21<51:54, 6.02s/it] {'loss': 2.0259, 'grad_norm': 0.0253964052376794, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:54, 6.02s/it] 1%| | 4/520 [00:25<43:50, 5.10s/it] {'loss': 1.8065, 'grad_norm': 0.013462461321223095, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<43:50, 5.10s/it] 1%| | 5/520 [00:28<39:32, 4.61s/it] {'loss': 1.8983, 'grad_norm': 0.02457327425465394, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:32, 4.61s/it] 1%| | 6/520 [00:32<37:07, 4.33s/it] {'loss': 1.6681, 'grad_norm': 0.01367689739457581, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<37:07, 4.33s/it] 1%|▏ | 7/520 [00:36<35:38, 4.17s/it] {'loss': 1.5681, 'grad_norm': 0.012286060530754307, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<35:38, 4.17s/it] 2%|▏ | 8/520 [00:40<36:12, 4.24s/it] {'loss': 1.5823, 'grad_norm': 0.007318815071056249, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<36:12, 4.24s/it] 2%|▏ | 9/520 [00:45<35:56, 4.22s/it] {'loss': 1.6609, 'grad_norm': 0.008105159301755151, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<35:56, 4.22s/it] 2%|▏ | 10/520 [00:48<34:23, 4.05s/it] {'loss': 1.4571, 'grad_norm': 0.005825744632638324, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:23, 4.05s/it] 2%|▏ | 11/520 [00:52<33:35, 3.96s/it] {'loss': 1.553, 'grad_norm': 0.007150758717637741, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<33:35, 3.96s/it] 2%|▏ | 12/520 [00:56<32:43, 3.87s/it] {'loss': 1.4996, 'grad_norm': 0.004958728301991583, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<32:43, 3.87s/it][2025-10-13 12:31:29,398] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<34:02, 4.03s/it] {'loss': 1.4892, 'grad_norm': 0.005002561088055928, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<34:02, 4.03s/it] 3%|▎ | 14/520 [01:04<33:21, 3.95s/it] {'loss': 1.5546, 'grad_norm': 0.005456342308568462, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:04<33:21, 3.95s/it] 3%|▎ | 15/520 [01:08<32:29, 3.86s/it] {'loss': 1.5576, 'grad_norm': 0.005065348255582278, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<32:29, 3.86s/it] 3%|▎ | 16/520 [01:11<31:54, 3.80s/it] {'loss': 1.4997, 'grad_norm': 0.004709344510957822, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<31:54, 3.80s/it] 3%|▎ | 17/520 [01:15<31:30, 3.76s/it] {'loss': 1.5886, 'grad_norm': 0.004241805856864172, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<31:30, 3.76s/it] 3%|▎ | 18/520 [01:18<31:09, 3.72s/it] {'loss': 1.4361, 'grad_norm': 0.003505449469236355, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:09, 3.72s/it] 4%|▎ | 19/520 [01:22<30:57, 3.71s/it] {'loss': 1.542, 'grad_norm': 0.0038073388343102662, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<30:57, 3.71s/it] 4%|▍ | 20/520 [01:26<30:46, 3.69s/it] {'loss': 1.4477, 'grad_norm': 0.004265901173771942, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<30:46, 3.69s/it] 4%|▍ | 21/520 [01:29<30:37, 3.68s/it] {'loss': 1.5584, 'grad_norm': 0.004696558022253554, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<30:37, 3.68s/it] 4%|▍ | 22/520 [01:33<30:27, 3.67s/it] {'loss': 1.5866, 'grad_norm': 0.003288028336151051, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<30:27, 3.67s/it] 4%|▍ | 23/520 [01:37<30:20, 3.66s/it] {'loss': 1.519, 'grad_norm': 0.0032746322757949567, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:20, 3.66s/it] 5%|▍ | 24/520 [01:40<30:13, 3.66s/it] {'loss': 1.4964, 'grad_norm': 0.0035285465100573445, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:40<30:13, 3.66s/it] 5%|▍ | 25/520 [01:44<30:05, 3.65s/it] {'loss': 1.541, 'grad_norm': 0.00372341538886531, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:05, 3.65s/it] 5%|▌ | 26/520 [01:48<30:07, 3.66s/it] {'loss': 1.5237, 'grad_norm': 0.0030247163241027756, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:07, 3.66s/it] 5%|▌ | 27/520 [01:51<30:00, 3.65s/it] {'loss': 1.4395, 'grad_norm': 0.00368756866386643, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:51<30:00, 3.65s/it] 5%|▌ | 28/520 [01:55<29:54, 3.65s/it] {'loss': 1.4212, 'grad_norm': 0.0032854550809871793, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:55<29:54, 3.65s/it] 6%|▌ | 29/520 [01:59<29:50, 3.65s/it] {'loss': 1.4373, 'grad_norm': 0.0031809121820824886, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<29:50, 3.65s/it] 6%|▌ | 30/520 [02:02<29:51, 3.66s/it] {'loss': 1.6012, 'grad_norm': 0.003803511916954678, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:02<29:51, 3.66s/it] 6%|▌ | 31/520 [02:06<29:47, 3.66s/it] {'loss': 1.4255, 'grad_norm': 0.0027077376450656407, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:06<29:47, 3.66s/it] 6%|▌ | 32/520 [02:10<29:47, 3.66s/it] {'loss': 1.5686, 'grad_norm': 0.006489892865136119, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:47, 3.66s/it] 6%|▋ | 33/520 [02:13<29:40, 3.66s/it] {'loss': 1.4452, 'grad_norm': 0.003546714451072051, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:13<29:40, 3.66s/it] 7%|▋ | 34/520 [02:17<29:42, 3.67s/it] {'loss': 1.4234, 'grad_norm': 0.0037505159687674207, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:17<29:42, 3.67s/it] 7%|▋ | 35/520 [02:21<29:33, 3.66s/it] {'loss': 1.4573, 'grad_norm': 0.004053532019506037, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<29:33, 3.66s/it] 7%|▋ | 36/520 [02:24<29:34, 3.67s/it] {'loss': 1.5545, 'grad_norm': 0.002984555253462861, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:24<29:34, 3.67s/it] 7%|▋ | 37/520 [02:28<29:46, 3.70s/it] {'loss': 1.6142, 'grad_norm': 0.004808839029389827, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:28<29:46, 3.70s/it] 7%|▋ | 38/520 [02:32<29:45, 3.70s/it] {'loss': 1.6313, 'grad_norm': 0.003276076825468106, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:45, 3.70s/it] 8%|▊ | 39/520 [02:35<29:32, 3.69s/it] {'loss': 1.4574, 'grad_norm': 0.003366909988805981, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:35<29:32, 3.69s/it] 8%|▊ | 40/520 [02:39<29:18, 3.66s/it] {'loss': 1.4962, 'grad_norm': 0.002792109562182542, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:39<29:18, 3.66s/it] 8%|▊ | 41/520 [02:43<29:12, 3.66s/it] {'loss': 1.4606, 'grad_norm': 0.0029517871945591422, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:12, 3.66s/it] 8%|▊ | 42/520 [02:46<29:10, 3.66s/it] {'loss': 1.4998, 'grad_norm': 0.0037555240018739655, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:46<29:10, 3.66s/it] 8%|▊ | 43/520 [02:50<29:06, 3.66s/it] {'loss': 1.4711, 'grad_norm': 0.003918305909324568, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:50<29:06, 3.66s/it] 8%|▊ | 44/520 [02:54<29:01, 3.66s/it] {'loss': 1.5904, 'grad_norm': 0.004950754272000498, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<29:01, 3.66s/it] 9%|▊ | 45/520 [02:57<29:00, 3.66s/it] {'loss': 1.5057, 'grad_norm': 0.0036426732546122388, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:57<29:00, 3.66s/it] 9%|▉ | 46/520 [03:01<28:55, 3.66s/it] {'loss': 1.6946, 'grad_norm': 0.004498206993273597, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:01<28:55, 3.66s/it] 9%|▉ | 47/520 [03:05<28:53, 3.66s/it] {'loss': 1.4911, 'grad_norm': 0.0036459115801472557, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<28:53, 3.66s/it] 9%|▉ | 48/520 [03:08<28:54, 3.68s/it] {'loss': 1.4501, 'grad_norm': 0.0032182560341856696, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:08<28:54, 3.68s/it] 9%|▉ | 49/520 [03:12<28:57, 3.69s/it] {'loss': 1.4956, 'grad_norm': 0.0027759951545267606, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:12<28:57, 3.69s/it] 10%|▉ | 50/520 [03:16<28:58, 3.70s/it] {'loss': 1.4904, 'grad_norm': 0.0028352235517949223, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:16<28:58, 3.70s/it] 10%|▉ | 51/520 [03:19<28:50, 3.69s/it] {'loss': 1.4122, 'grad_norm': 0.0031400670897652407, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:19<28:50, 3.69s/it] 10%|█ | 52/520 [03:23<28:42, 3.68s/it] {'loss': 1.5472, 'grad_norm': 0.0037717942226233934, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:23<28:42, 3.68s/it] 10%|█ | 53/520 [03:27<29:04, 3.73s/it] {'loss': 1.5432, 'grad_norm': 0.003057747208830379, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:27<29:04, 3.73s/it] 10%|█ | 54/520 [03:31<29:17, 3.77s/it] {'loss': 1.4354, 'grad_norm': 0.0033587812760484093, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<29:17, 3.77s/it] 11%|█ | 55/520 [03:35<29:23, 3.79s/it] {'loss': 1.4181, 'grad_norm': 0.003193290566735659, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:35<29:23, 3.79s/it] 11%|█ | 56/520 [03:39<29:26, 3.81s/it] {'loss': 1.557, 'grad_norm': 0.003041167799297441, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:39<29:26, 3.81s/it] 11%|█ | 57/520 [03:42<29:29, 3.82s/it] {'loss': 1.4134, 'grad_norm': 0.004484438484917933, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<29:29, 3.82s/it] 11%|█ | 58/520 [03:46<29:30, 3.83s/it] {'loss': 1.5682, 'grad_norm': 0.002697729852215578, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:46<29:30, 3.83s/it] 11%|█▏ | 59/520 [03:50<29:33, 3.85s/it] {'loss': 1.6981, 'grad_norm': 0.016582603789461886, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:50<29:33, 3.85s/it] 12%|█▏ | 60/520 [03:54<29:31, 3.85s/it] {'loss': 1.5039, 'grad_norm': 0.00566375118994027, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:54<29:31, 3.85s/it] 12%|█▏ | 61/520 [03:58<29:32, 3.86s/it] {'loss': 1.6311, 'grad_norm': 0.004050540480492602, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:58<29:32, 3.86s/it] 12%|█▏ | 62/520 [04:02<29:27, 3.86s/it] {'loss': 1.4712, 'grad_norm': 0.003829404483746922, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:02<29:27, 3.86s/it] 12%|█▏ | 63/520 [04:06<29:27, 3.87s/it] {'loss': 1.4535, 'grad_norm': 0.004273257064525041, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:06<29:27, 3.87s/it] 12%|█▏ | 64/520 [04:09<28:58, 3.81s/it] {'loss': 1.499, 'grad_norm': 0.0027567277422235288, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:09<28:58, 3.81s/it] 12%|█▎ | 65/520 [04:13<28:36, 3.77s/it] {'loss': 1.4974, 'grad_norm': 0.003497391306534512, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:13<28:36, 3.77s/it] 13%|█▎ | 66/520 [04:17<28:25, 3.76s/it] {'loss': 1.4546, 'grad_norm': 0.0044892733432863275, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:17<28:25, 3.76s/it] 13%|█▎ | 67/520 [04:20<28:07, 3.73s/it] {'loss': 1.3331, 'grad_norm': 0.0025454463661001565, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:20<28:07, 3.73s/it] 13%|█▎ | 68/520 [04:24<27:54, 3.70s/it] {'loss': 1.388, 'grad_norm': 0.002860325918777254, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:24<27:54, 3.70s/it] 13%|█▎ | 69/520 [04:28<27:45, 3.69s/it] {'loss': 1.3752, 'grad_norm': 0.004290375140700329, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:28<27:45, 3.69s/it] 13%|█▎ | 70/520 [04:31<27:35, 3.68s/it] {'loss': 1.4261, 'grad_norm': 0.002896884430549318, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:31<27:35, 3.68s/it] 14%|█▎ | 71/520 [04:35<27:37, 3.69s/it] {'loss': 1.347, 'grad_norm': 0.0029755563591322286, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:35<27:37, 3.69s/it] 14%|█▍ | 72/520 [04:39<27:36, 3.70s/it] {'loss': 1.4992, 'grad_norm': 0.003407986189849761, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:39<27:36, 3.70s/it] 14%|█▍ | 73/520 [04:42<27:30, 3.69s/it] {'loss': 1.3184, 'grad_norm': 0.0026699777744540513, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:42<27:30, 3.69s/it] 14%|█▍ | 74/520 [04:46<27:27, 3.69s/it] {'loss': 1.4345, 'grad_norm': 0.0027491909354495497, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:46<27:27, 3.69s/it] 14%|█▍ | 75/520 [04:50<27:17, 3.68s/it] {'loss': 1.344, 'grad_norm': 0.0028150524817182753, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:50<27:17, 3.68s/it] 15%|█▍ | 76/520 [04:53<27:09, 3.67s/it] {'loss': 1.656, 'grad_norm': 0.0032501197585515216, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:53<27:09, 3.67s/it] 15%|█▍ | 77/520 [04:57<27:04, 3.67s/it] {'loss': 1.2742, 'grad_norm': 0.0033723478414807687, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:57<27:04, 3.67s/it] 15%|█▌ | 78/520 [05:01<27:02, 3.67s/it] {'loss': 1.3912, 'grad_norm': 0.0027889015518825526, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:01<27:02, 3.67s/it] 15%|█▌ | 79/520 [05:04<26:56, 3.67s/it] {'loss': 1.3789, 'grad_norm': 0.00236762989092219, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:04<26:56, 3.67s/it] 15%|█▌ | 80/520 [05:08<26:55, 3.67s/it] {'loss': 1.6635, 'grad_norm': 0.0040054217113591585, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:08<26:55, 3.67s/it] 16%|█▌ | 81/520 [05:12<26:49, 3.67s/it] {'loss': 1.5308, 'grad_norm': 0.003389035599423091, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:12<26:49, 3.67s/it] 16%|█▌ | 82/520 [05:15<26:46, 3.67s/it] {'loss': 1.4617, 'grad_norm': 0.0025611605140559604, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:15<26:46, 3.67s/it] 16%|█▌ | 83/520 [05:19<27:03, 3.72s/it] {'loss': 1.4897, 'grad_norm': 0.003032745829902103, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:19<27:03, 3.72s/it] 16%|█▌ | 84/520 [05:23<27:10, 3.74s/it] {'loss': 1.4876, 'grad_norm': 0.003195886871202237, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:23<27:10, 3.74s/it] 16%|█▋ | 85/520 [05:27<27:16, 3.76s/it] {'loss': 1.5031, 'grad_norm': 0.0028339519810611172, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:27<27:16, 3.76s/it] 17%|█▋ | 86/520 [05:31<27:15, 3.77s/it] {'loss': 1.5186, 'grad_norm': 0.002896693165522569, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:31<27:15, 3.77s/it] 17%|█▋ | 87/520 [05:34<26:54, 3.73s/it] {'loss': 1.6077, 'grad_norm': 0.004333322703347984, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:34<26:54, 3.73s/it] 17%|█▋ | 88/520 [05:38<26:46, 3.72s/it] {'loss': 1.6426, 'grad_norm': 0.004286554197896949, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:38<26:46, 3.72s/it] 17%|█▋ | 89/520 [05:42<26:30, 3.69s/it] {'loss': 1.4668, 'grad_norm': 0.0029080029888188894, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:42<26:30, 3.69s/it] 17%|█▋ | 90/520 [05:45<26:22, 3.68s/it] {'loss': 1.3973, 'grad_norm': 0.0027066927371020735, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:45<26:22, 3.68s/it] 18%|█▊ | 91/520 [05:49<26:12, 3.67s/it] {'loss': 1.4639, 'grad_norm': 0.002466066746604248, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:49<26:12, 3.67s/it] 18%|█▊ | 92/520 [05:53<26:07, 3.66s/it] {'loss': 1.4081, 'grad_norm': 0.002679018842493166, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:53<26:07, 3.66s/it] 18%|█▊ | 93/520 [05:56<26:04, 3.66s/it] {'loss': 1.4257, 'grad_norm': 0.002811153445902329, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:56<26:04, 3.66s/it] 18%|█▊ | 94/520 [06:00<25:55, 3.65s/it] {'loss': 1.5178, 'grad_norm': 0.00304456762733411, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:00<25:55, 3.65s/it] 18%|█▊ | 95/520 [06:04<25:55, 3.66s/it] {'loss': 1.4007, 'grad_norm': 0.0033641981143937177, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:04<25:55, 3.66s/it] 18%|█▊ | 96/520 [06:07<25:52, 3.66s/it] {'loss': 1.4147, 'grad_norm': 0.0023754492693989497, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:07<25:52, 3.66s/it] 19%|█▊ | 97/520 [06:11<25:45, 3.65s/it] {'loss': 1.3748, 'grad_norm': 0.002842501331340849, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:11<25:45, 3.65s/it] 19%|█▉ | 98/520 [06:14<25:40, 3.65s/it] {'loss': 1.3764, 'grad_norm': 0.002317082754932632, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:14<25:40, 3.65s/it] 19%|█▉ | 99/520 [06:18<25:43, 3.67s/it] {'loss': 1.3993, 'grad_norm': 0.0028468752588015935, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:18<25:43, 3.67s/it] 19%|█▉ | 100/520 [06:22<25:37, 3.66s/it] {'loss': 1.4981, 'grad_norm': 0.0029304389856192317, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:22<25:37, 3.66s/it] 19%|█▉ | 101/520 [06:25<25:33, 3.66s/it] {'loss': 1.3934, 'grad_norm': 0.0028361305408132523, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:25<25:33, 3.66s/it] 20%|█▉ | 102/520 [06:29<25:26, 3.65s/it] {'loss': 1.3941, 'grad_norm': 0.002671519014606556, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:29<25:26, 3.65s/it] 20%|█▉ | 103/520 [06:33<25:28, 3.67s/it] {'loss': 1.3252, 'grad_norm': 0.0023456106590967396, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:33<25:28, 3.67s/it] 20%|██ | 104/520 [06:36<25:26, 3.67s/it] {'loss': 1.4002, 'grad_norm': 0.002666485489898082, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:36<25:26, 3.67s/it] 20%|██ | 105/520 [06:40<25:19, 3.66s/it] {'loss': 1.3986, 'grad_norm': 0.0022815774715765234, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:40<25:19, 3.66s/it] 20%|██ | 106/520 [06:44<25:12, 3.65s/it] {'loss': 1.4945, 'grad_norm': 0.0026090590005861626, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:44<25:12, 3.65s/it] 21%|██ | 107/520 [06:47<25:14, 3.67s/it] {'loss': 1.4732, 'grad_norm': 0.0028982705381457546, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:47<25:14, 3.67s/it] 21%|██ | 108/520 [06:51<25:10, 3.67s/it] {'loss': 1.3456, 'grad_norm': 0.002631817109412352, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:51<25:10, 3.67s/it] 21%|██ | 109/520 [06:55<25:07, 3.67s/it] {'loss': 1.4587, 'grad_norm': 0.0025584322360177403, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:55<25:07, 3.67s/it] 21%|██ | 110/520 [06:58<25:03, 3.67s/it] {'loss': 1.5471, 'grad_norm': 0.00248996520275113, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [06:58<25:03, 3.67s/it] 21%|██▏ | 111/520 [07:02<25:00, 3.67s/it] {'loss': 1.5584, 'grad_norm': 0.002621329655474013, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:02<25:00, 3.67s/it] 22%|██▏ | 112/520 [07:06<24:55, 3.67s/it] {'loss': 1.4328, 'grad_norm': 0.0024398299805448707, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:06<24:55, 3.67s/it] 22%|██▏ | 113/520 [07:09<24:53, 3.67s/it] {'loss': 1.2963, 'grad_norm': 0.0021375220045330673, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:09<24:53, 3.67s/it] 22%|██▏ | 114/520 [07:13<24:51, 3.67s/it] {'loss': 1.3984, 'grad_norm': 0.002258238267327826, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:13<24:51, 3.67s/it] 22%|██▏ | 115/520 [07:17<24:47, 3.67s/it] {'loss': 1.5262, 'grad_norm': 0.0022835833608539206, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:17<24:47, 3.67s/it] 22%|██▏ | 116/520 [07:21<24:44, 3.67s/it] {'loss': 1.5177, 'grad_norm': 0.0022052631655260823, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:21<24:44, 3.67s/it] 22%|██▎ | 117/520 [07:24<24:38, 3.67s/it] {'loss': 1.4942, 'grad_norm': 0.0025803586952524615, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:24<24:38, 3.67s/it] 23%|██▎ | 118/520 [07:28<24:31, 3.66s/it] {'loss': 1.3757, 'grad_norm': 0.002183255237797966, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:28<24:31, 3.66s/it] 23%|██▎ | 119/520 [07:31<24:28, 3.66s/it] {'loss': 1.3338, 'grad_norm': 0.002225689844197637, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:31<24:28, 3.66s/it] 23%|██▎ | 120/520 [07:35<24:25, 3.66s/it] {'loss': 1.3605, 'grad_norm': 0.0029236615438642116, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:35<24:25, 3.66s/it] 23%|██▎ | 121/520 [07:39<24:17, 3.65s/it] {'loss': 1.4256, 'grad_norm': 0.0025168368867386206, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:39<24:17, 3.65s/it] 23%|██▎ | 122/520 [07:42<24:16, 3.66s/it] {'loss': 1.3034, 'grad_norm': 0.002213659014326122, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:42<24:16, 3.66s/it] 24%|██▎ | 123/520 [07:46<24:13, 3.66s/it] {'loss': 1.5182, 'grad_norm': 0.002812062228012048, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:46<24:13, 3.66s/it] 24%|██▍ | 124/520 [07:50<24:07, 3.65s/it] {'loss': 1.399, 'grad_norm': 0.0025690063794693817, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:50<24:07, 3.65s/it] 24%|██▍ | 125/520 [07:53<24:10, 3.67s/it] {'loss': 1.3781, 'grad_norm': 0.0023055428485636844, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:53<24:10, 3.67s/it] 24%|██▍ | 126/520 [07:58<25:26, 3.87s/it] {'loss': 1.437, 'grad_norm': 0.002162083908759765, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:58<25:26, 3.87s/it] 24%|██▍ | 127/520 [08:01<24:56, 3.81s/it] {'loss': 1.3532, 'grad_norm': 0.0028193217082185104, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:01<24:56, 3.81s/it] 25%|██▍ | 128/520 [08:05<24:36, 3.77s/it] {'loss': 1.4147, 'grad_norm': 0.0023807664591793476, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:05<24:36, 3.77s/it] 25%|██▍ | 129/520 [08:09<24:19, 3.73s/it] {'loss': 1.3357, 'grad_norm': 0.0019925262416812636, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:09<24:19, 3.73s/it] 25%|██▌ | 130/520 [08:12<24:07, 3.71s/it] {'loss': 1.3931, 'grad_norm': 0.002191657949306151, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:12<24:07, 3.71s/it] 25%|██▌ | 131/520 [08:16<23:58, 3.70s/it] {'loss': 1.3916, 'grad_norm': 0.002351630963207994, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:16<23:58, 3.70s/it] 25%|██▌ | 132/520 [08:20<23:50, 3.69s/it] {'loss': 1.4354, 'grad_norm': 0.00230220059576615, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:20<23:50, 3.69s/it] 26%|██▌ | 133/520 [08:23<23:43, 3.68s/it] {'loss': 1.3428, 'grad_norm': 0.0023572439938803467, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:23<23:43, 3.68s/it] 26%|██▌ | 134/520 [08:27<23:40, 3.68s/it] {'loss': 1.4328, 'grad_norm': 0.002482263853809459, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:27<23:40, 3.68s/it] 26%|██▌ | 135/520 [08:31<23:34, 3.67s/it] {'loss': 1.5011, 'grad_norm': 0.0023377051652798317, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:31<23:34, 3.67s/it] 26%|██▌ | 136/520 [08:34<23:27, 3.67s/it] {'loss': 1.423, 'grad_norm': 0.002524045049832575, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:34<23:27, 3.67s/it] 26%|██▋ | 137/520 [08:38<23:27, 3.67s/it] {'loss': 1.3455, 'grad_norm': 0.0026515156645917186, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:38<23:27, 3.67s/it] 27%|██▋ | 138/520 [08:42<23:43, 3.73s/it] {'loss': 1.3499, 'grad_norm': 0.0022949500658148566, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:42<23:43, 3.73s/it] 27%|██▋ | 139/520 [08:46<23:34, 3.71s/it] {'loss': 1.2945, 'grad_norm': 0.0024442161026109535, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:46<23:34, 3.71s/it] 27%|██▋ | 140/520 [08:49<23:24, 3.70s/it] {'loss': 1.4375, 'grad_norm': 0.002318266130751675, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:49<23:24, 3.70s/it] 27%|██▋ | 141/520 [08:53<23:20, 3.70s/it] {'loss': 1.4644, 'grad_norm': 0.0024739135323664347, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:53<23:20, 3.70s/it] 27%|██▋ | 142/520 [08:57<23:12, 3.68s/it] {'loss': 1.4956, 'grad_norm': 0.0022517993632851507, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:57<23:12, 3.68s/it] 28%|██▊ | 143/520 [09:00<23:07, 3.68s/it] {'loss': 1.3785, 'grad_norm': 0.002542307247788855, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:00<23:07, 3.68s/it] 28%|██▊ | 144/520 [09:04<23:03, 3.68s/it] {'loss': 1.3369, 'grad_norm': 0.002263479754890158, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:04<23:03, 3.68s/it] 28%|██▊ | 145/520 [09:08<23:04, 3.69s/it] {'loss': 1.2663, 'grad_norm': 0.0019728665934100834, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:08<23:04, 3.69s/it] 28%|██▊ | 146/520 [09:11<22:56, 3.68s/it] {'loss': 1.5216, 'grad_norm': 0.0022875908096742377, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:11<22:56, 3.68s/it] 28%|██▊ | 147/520 [09:15<22:53, 3.68s/it] {'loss': 1.3129, 'grad_norm': 0.0023579761346693012, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:15<22:53, 3.68s/it] 28%|██▊ | 148/520 [09:19<22:46, 3.67s/it] {'loss': 1.3439, 'grad_norm': 0.002141204157578225, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:19<22:46, 3.67s/it] 29%|██▊ | 149/520 [09:22<22:44, 3.68s/it] {'loss': 1.2929, 'grad_norm': 0.0022561956740461053, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:22<22:44, 3.68s/it] 29%|██▉ | 150/520 [09:26<22:39, 3.67s/it] {'loss': 1.528, 'grad_norm': 0.0027894249566153603, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:26<22:39, 3.67s/it] 29%|██▉ | 151/520 [09:30<22:33, 3.67s/it] {'loss': 1.3351, 'grad_norm': 0.0021562314064814832, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:30<22:33, 3.67s/it] 29%|██▉ | 152/520 [09:33<22:31, 3.67s/it] {'loss': 1.3027, 'grad_norm': 0.002322672917377266, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:33<22:31, 3.67s/it] 29%|██▉ | 153/520 [09:37<22:26, 3.67s/it] {'loss': 1.3351, 'grad_norm': 0.0025240129918244684, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:37<22:26, 3.67s/it] 30%|██▉ | 154/520 [09:41<22:22, 3.67s/it] {'loss': 1.4291, 'grad_norm': 0.0021416165459644805, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:41<22:22, 3.67s/it] 30%|██▉ | 155/520 [09:44<22:18, 3.67s/it] {'loss': 1.3382, 'grad_norm': 0.002255779095441287, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:44<22:18, 3.67s/it] 30%|███ | 156/520 [09:48<22:13, 3.66s/it] {'loss': 1.3674, 'grad_norm': 0.002428891871032354, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:48<22:13, 3.66s/it] 30%|███ | 157/520 [09:52<22:05, 3.65s/it] {'loss': 1.5153, 'grad_norm': 0.002577102345011543, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:52<22:05, 3.65s/it] 30%|███ | 158/520 [09:55<22:00, 3.65s/it] {'loss': 1.3454, 'grad_norm': 0.0025657513733098867, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:55<22:00, 3.65s/it] 31%|███ | 159/520 [09:59<21:58, 3.65s/it] {'loss': 1.3701, 'grad_norm': 0.0022107967065279673, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [09:59<21:58, 3.65s/it] 31%|███ | 160/520 [10:03<21:54, 3.65s/it] {'loss': 1.3997, 'grad_norm': 0.0022288750208678725, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:03<21:54, 3.65s/it] 31%|███ | 161/520 [10:06<21:49, 3.65s/it] {'loss': 1.3852, 'grad_norm': 0.002252300313808351, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:06<21:49, 3.65s/it] 31%|███ | 162/520 [10:10<21:45, 3.65s/it] {'loss': 1.4407, 'grad_norm': 0.002397358896682201, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:10<21:45, 3.65s/it] 31%|███▏ | 163/520 [10:14<21:47, 3.66s/it] {'loss': 1.2538, 'grad_norm': 0.00280431604140018, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:14<21:47, 3.66s/it] 32%|███▏ | 164/520 [10:17<21:44, 3.66s/it] {'loss': 1.2257, 'grad_norm': 0.0020988563454056386, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:17<21:44, 3.66s/it] 32%|███▏ | 165/520 [10:21<21:35, 3.65s/it] {'loss': 1.3684, 'grad_norm': 0.002150573957716539, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:21<21:35, 3.65s/it] 32%|███▏ | 166/520 [10:25<21:28, 3.64s/it] {'loss': 1.3639, 'grad_norm': 0.0024880850406375846, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:25<21:28, 3.64s/it] 32%|███▏ | 167/520 [10:28<21:28, 3.65s/it] {'loss': 1.3549, 'grad_norm': 0.0025551638743221156, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:28<21:28, 3.65s/it] 32%|███▏ | 168/520 [10:32<21:25, 3.65s/it] {'loss': 1.2807, 'grad_norm': 0.002202038833166248, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:32<21:25, 3.65s/it] 32%|███▎ | 169/520 [10:35<21:21, 3.65s/it] {'loss': 1.3704, 'grad_norm': 0.0021290945449759942, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:35<21:21, 3.65s/it] 33%|███▎ | 170/520 [10:39<21:20, 3.66s/it] {'loss': 1.3721, 'grad_norm': 0.0024695579967416986, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:39<21:20, 3.66s/it] 33%|███▎ | 171/520 [10:43<21:23, 3.68s/it] {'loss': 1.2973, 'grad_norm': 0.002504295313927535, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:43<21:23, 3.68s/it] 33%|███▎ | 172/520 [10:47<21:21, 3.68s/it] {'loss': 1.3637, 'grad_norm': 0.002049196910828029, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:47<21:21, 3.68s/it] 33%|███▎ | 173/520 [10:50<21:13, 3.67s/it] {'loss': 1.3055, 'grad_norm': 0.0023031425126453105, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:50<21:13, 3.67s/it] 33%|███▎ | 174/520 [10:54<21:10, 3.67s/it] {'loss': 1.3729, 'grad_norm': 0.002580331210617441, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:54<21:10, 3.67s/it] 34%|███▎ | 175/520 [10:58<21:17, 3.70s/it] {'loss': 1.2632, 'grad_norm': 0.0021286562546430424, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [10:58<21:17, 3.70s/it] 34%|███▍ | 176/520 [11:02<21:27, 3.74s/it] {'loss': 1.4554, 'grad_norm': 0.002255733448532935, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:02<21:27, 3.74s/it] 34%|███▍ | 177/520 [11:05<21:33, 3.77s/it] {'loss': 1.3194, 'grad_norm': 0.002533832018730257, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:05<21:33, 3.77s/it] 34%|███▍ | 178/520 [11:09<21:19, 3.74s/it] {'loss': 1.344, 'grad_norm': 0.0024340479512965753, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:09<21:19, 3.74s/it] 34%|███▍ | 179/520 [11:13<21:09, 3.72s/it] {'loss': 1.4316, 'grad_norm': 0.002094956938727727, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:13<21:09, 3.72s/it] 35%|███▍ | 180/520 [11:16<21:02, 3.71s/it] {'loss': 1.3414, 'grad_norm': 0.002302309014467398, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:16<21:02, 3.71s/it] 35%|███▍ | 181/520 [11:20<20:55, 3.70s/it] {'loss': 1.3185, 'grad_norm': 0.002369844489572805, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:20<20:55, 3.70s/it] 35%|███▌ | 182/520 [11:24<20:51, 3.70s/it] {'loss': 1.3239, 'grad_norm': 0.0021394949385384974, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:24<20:51, 3.70s/it] 35%|███▌ | 183/520 [11:27<20:44, 3.69s/it] {'loss': 1.3613, 'grad_norm': 0.002435521111788183, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:27<20:44, 3.69s/it] 35%|███▌ | 184/520 [11:31<20:39, 3.69s/it] {'loss': 1.2664, 'grad_norm': 0.00216634926016715, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:31<20:39, 3.69s/it] 36%|███▌ | 185/520 [11:35<20:31, 3.68s/it] {'loss': 1.4444, 'grad_norm': 0.0021665034567990753, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:35<20:31, 3.68s/it] 36%|███▌ | 186/520 [11:38<20:22, 3.66s/it] {'loss': 1.2971, 'grad_norm': 0.0020851315367194207, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:38<20:22, 3.66s/it] 36%|███▌ | 187/520 [11:42<20:17, 3.66s/it] {'loss': 1.309, 'grad_norm': 0.0023402608467367855, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:42<20:17, 3.66s/it] 36%|███▌ | 188/520 [11:46<20:08, 3.64s/it] {'loss': 1.3847, 'grad_norm': 0.00226384602092878, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:46<20:08, 3.64s/it] 36%|███▋ | 189/520 [11:49<20:04, 3.64s/it] {'loss': 1.3921, 'grad_norm': 0.0020162714613302817, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:49<20:04, 3.64s/it] 37%|███▋ | 190/520 [11:53<19:58, 3.63s/it] {'loss': 1.3114, 'grad_norm': 0.0023972590513345383, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:53<19:58, 3.63s/it] 37%|███▋ | 191/520 [11:57<19:56, 3.64s/it] {'loss': 1.2642, 'grad_norm': 0.0019991864213763744, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [11:57<19:56, 3.64s/it] 37%|███▋ | 192/520 [12:00<19:55, 3.64s/it] {'loss': 1.3555, 'grad_norm': 0.002043222714222568, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:00<19:55, 3.64s/it] 37%|███▋ | 193/520 [12:04<19:51, 3.64s/it] {'loss': 1.3929, 'grad_norm': 0.0025407099578907003, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:04<19:51, 3.64s/it] 37%|███▋ | 194/520 [12:07<19:49, 3.65s/it] {'loss': 1.2645, 'grad_norm': 0.002129012874673532, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:08<19:49, 3.65s/it] 38%|███▊ | 195/520 [12:11<19:45, 3.65s/it] {'loss': 1.3736, 'grad_norm': 0.002166993141678551, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:11<19:45, 3.65s/it] 38%|███▊ | 196/520 [12:15<19:43, 3.65s/it] {'loss': 1.3341, 'grad_norm': 0.0023219332705836054, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:15<19:43, 3.65s/it] 38%|███▊ | 197/520 [12:18<19:41, 3.66s/it] {'loss': 1.2983, 'grad_norm': 0.0020570076425358488, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:18<19:41, 3.66s/it] 38%|███▊ | 198/520 [12:22<19:34, 3.65s/it] {'loss': 1.3648, 'grad_norm': 0.0022122164478892304, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:22<19:34, 3.65s/it] 38%|███▊ | 199/520 [12:26<19:32, 3.65s/it] {'loss': 1.2789, 'grad_norm': 0.0022989051848803003, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:26<19:32, 3.65s/it] 38%|███▊ | 200/520 [12:29<19:27, 3.65s/it] {'loss': 1.3307, 'grad_norm': 0.002305551200055762, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:29<19:27, 3.65s/it] 39%|███▊ | 201/520 [12:33<19:24, 3.65s/it] {'loss': 1.3299, 'grad_norm': 0.0019385123484822048, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:33<19:24, 3.65s/it] 39%|███▉ | 202/520 [12:37<19:22, 3.65s/it] {'loss': 1.2784, 'grad_norm': 0.0020848221453458717, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:37<19:22, 3.65s/it] 39%|███▉ | 203/520 [12:40<19:16, 3.65s/it] {'loss': 1.3362, 'grad_norm': 0.002225726881938612, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:40<19:16, 3.65s/it] 39%|███▉ | 204/520 [12:44<19:11, 3.64s/it] {'loss': 1.3635, 'grad_norm': 0.002359818829274621, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:44<19:11, 3.64s/it] 39%|███▉ | 205/520 [12:48<19:11, 3.65s/it] {'loss': 1.3421, 'grad_norm': 0.0021771022696081316, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:48<19:11, 3.65s/it] 40%|███▉ | 206/520 [12:51<19:04, 3.64s/it] {'loss': 1.4018, 'grad_norm': 0.002078336518762771, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:51<19:04, 3.64s/it] 40%|███▉ | 207/520 [12:55<18:59, 3.64s/it] {'loss': 1.3185, 'grad_norm': 0.00203256756782431, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [12:55<18:59, 3.64s/it] 40%|████ | 208/520 [12:59<18:57, 3.65s/it] {'loss': 1.37, 'grad_norm': 0.0024051133306057294, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [12:59<18:57, 3.65s/it] 40%|████ | 209/520 [13:02<18:58, 3.66s/it] {'loss': 1.2861, 'grad_norm': 0.0019539801485241675, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:02<18:58, 3.66s/it] 40%|████ | 210/520 [13:06<18:59, 3.67s/it] {'loss': 1.3753, 'grad_norm': 0.002238684759763301, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:06<18:59, 3.67s/it] 41%|████ | 211/520 [13:10<18:56, 3.68s/it] {'loss': 1.3774, 'grad_norm': 0.0020576795177891256, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:10<18:56, 3.68s/it] 41%|████ | 212/520 [13:13<18:52, 3.68s/it] {'loss': 1.3433, 'grad_norm': 0.0019430588659045144, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:13<18:52, 3.68s/it] 41%|████ | 213/520 [13:17<18:50, 3.68s/it] {'loss': 1.3111, 'grad_norm': 0.0023613392265373627, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:17<18:50, 3.68s/it] 41%|████ | 214/520 [13:21<19:01, 3.73s/it] {'loss': 1.2977, 'grad_norm': 0.0022043274330943445, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:21<19:01, 3.73s/it] 41%|████▏ | 215/520 [13:25<19:13, 3.78s/it] {'loss': 1.2623, 'grad_norm': 0.002114982648062172, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:25<19:13, 3.78s/it] 42%|████▏ | 216/520 [13:29<19:17, 3.81s/it] {'loss': 1.2147, 'grad_norm': 0.001987640842241842, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:29<19:17, 3.81s/it] 42%|████▏ | 217/520 [13:33<19:18, 3.82s/it] {'loss': 1.3367, 'grad_norm': 0.002144751543453942, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:33<19:18, 3.82s/it] 42%|████▏ | 218/520 [13:36<19:14, 3.82s/it] {'loss': 1.3299, 'grad_norm': 0.0021122005196566275, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:36<19:14, 3.82s/it] 42%|████▏ | 219/520 [13:40<19:13, 3.83s/it] {'loss': 1.3036, 'grad_norm': 0.0018530456041032707, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:40<19:13, 3.83s/it] 42%|████▏ | 220/520 [13:44<19:09, 3.83s/it] {'loss': 1.3085, 'grad_norm': 0.002088069023439986, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:44<19:09, 3.83s/it] 42%|████▎ | 221/520 [13:48<19:10, 3.85s/it] {'loss': 1.3324, 'grad_norm': 0.0020152256592065737, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:48<19:10, 3.85s/it] 43%|████▎ | 222/520 [13:52<19:04, 3.84s/it] {'loss': 1.2484, 'grad_norm': 0.002137238762182067, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:52<19:04, 3.84s/it] 43%|████▎ | 223/520 [13:56<19:00, 3.84s/it] {'loss': 1.2421, 'grad_norm': 0.0018993520196122027, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [13:56<19:00, 3.84s/it] 43%|████▎ | 224/520 [13:59<18:57, 3.84s/it] {'loss': 1.4472, 'grad_norm': 0.0028395619231312234, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [13:59<18:57, 3.84s/it] 43%|████▎ | 225/520 [14:03<18:53, 3.84s/it] {'loss': 1.263, 'grad_norm': 0.0020966411549890943, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:03<18:53, 3.84s/it] 43%|████▎ | 226/520 [14:07<18:53, 3.86s/it] {'loss': 1.3646, 'grad_norm': 0.0020070509994457135, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:07<18:53, 3.86s/it] 44%|████▎ | 227/520 [14:11<18:46, 3.84s/it] {'loss': 1.3506, 'grad_norm': 0.001955070804978748, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:11<18:46, 3.84s/it] 44%|████▍ | 228/520 [14:15<18:46, 3.86s/it] {'loss': 1.455, 'grad_norm': 0.003511886077178459, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:15<18:46, 3.86s/it] 44%|████▍ | 229/520 [14:19<18:28, 3.81s/it] {'loss': 1.3206, 'grad_norm': 0.001805855636404698, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:19<18:28, 3.81s/it] 44%|████▍ | 230/520 [14:22<18:21, 3.80s/it] {'loss': 1.2044, 'grad_norm': 0.0020689801491733163, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:22<18:21, 3.80s/it] 44%|████▍ | 231/520 [14:26<18:18, 3.80s/it] {'loss': 1.2656, 'grad_norm': 0.001838108342927907, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:26<18:18, 3.80s/it] 45%|████▍ | 232/520 [14:30<18:07, 3.78s/it] {'loss': 1.4736, 'grad_norm': 0.002368163694036981, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:30<18:07, 3.78s/it] 45%|████▍ | 233/520 [14:34<17:57, 3.75s/it] {'loss': 1.3544, 'grad_norm': 0.0021854434611733127, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:34<17:57, 3.75s/it] 45%|████▌ | 234/520 [14:37<17:48, 3.74s/it] {'loss': 1.2109, 'grad_norm': 0.002088400740889016, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:37<17:48, 3.74s/it] 45%|████▌ | 235/520 [14:41<17:37, 3.71s/it] {'loss': 1.2691, 'grad_norm': 0.002283492983056143, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:41<17:37, 3.71s/it] 45%|████▌ | 236/520 [14:45<17:31, 3.70s/it] {'loss': 1.3772, 'grad_norm': 0.0019333656425862987, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:45<17:31, 3.70s/it] 46%|████▌ | 237/520 [14:48<17:24, 3.69s/it] {'loss': 1.3451, 'grad_norm': 0.0019840549932750658, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:48<17:24, 3.69s/it] 46%|████▌ | 238/520 [14:52<17:15, 3.67s/it] {'loss': 1.2825, 'grad_norm': 0.002118221906912275, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:52<17:15, 3.67s/it] 46%|████▌ | 239/520 [14:56<17:11, 3.67s/it] {'loss': 1.378, 'grad_norm': 0.0020726179254046007, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [14:56<17:11, 3.67s/it] 46%|████▌ | 240/520 [14:59<17:10, 3.68s/it] {'loss': 1.1526, 'grad_norm': 0.002064808241549236, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [14:59<17:10, 3.68s/it] 46%|████▋ | 241/520 [15:03<17:05, 3.68s/it] {'loss': 1.2484, 'grad_norm': 0.0018953800284107034, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:03<17:05, 3.68s/it] 47%|████▋ | 242/520 [15:07<16:59, 3.67s/it] {'loss': 1.2628, 'grad_norm': 0.0018463525452865504, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:07<16:59, 3.67s/it] 47%|████▋ | 243/520 [15:10<16:55, 3.66s/it] {'loss': 1.2473, 'grad_norm': 0.001961030327451976, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:10<16:55, 3.66s/it] 47%|████▋ | 244/520 [15:14<16:51, 3.66s/it] {'loss': 1.3872, 'grad_norm': 0.0021670631243343296, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:14<16:51, 3.66s/it] 47%|████▋ | 245/520 [15:18<16:46, 3.66s/it] {'loss': 1.2322, 'grad_norm': 0.0019370905425231877, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:18<16:46, 3.66s/it] 47%|████▋ | 246/520 [15:21<16:43, 3.66s/it] {'loss': 1.4424, 'grad_norm': 0.002151116228169522, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:21<16:43, 3.66s/it] 48%|████▊ | 247/520 [15:25<16:37, 3.66s/it] {'loss': 1.422, 'grad_norm': 0.0020505840859278424, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:25<16:37, 3.66s/it] 48%|████▊ | 248/520 [15:29<16:40, 3.68s/it] {'loss': 1.2448, 'grad_norm': 0.0020087130857491704, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:29<16:40, 3.68s/it] 48%|████▊ | 249/520 [15:33<16:58, 3.76s/it] {'loss': 1.3477, 'grad_norm': 0.002049613808563701, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:33<16:58, 3.76s/it] 48%|████▊ | 250/520 [15:36<17:07, 3.81s/it] {'loss': 1.2812, 'grad_norm': 0.00212549381889212, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:36<17:07, 3.81s/it] 48%|████▊ | 251/520 [15:40<17:11, 3.84s/it] {'loss': 1.3452, 'grad_norm': 0.0018380417918588718, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:40<17:11, 3.84s/it] 48%|████▊ | 252/520 [15:44<17:11, 3.85s/it] {'loss': 1.3348, 'grad_norm': 0.0020462719380940153, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:44<17:11, 3.85s/it] 49%|████▊ | 253/520 [15:48<17:13, 3.87s/it] {'loss': 1.3466, 'grad_norm': 0.002287162683057668, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:48<17:13, 3.87s/it] 49%|████▉ | 254/520 [15:52<17:09, 3.87s/it] {'loss': 1.262, 'grad_norm': 0.0018670808117387493, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:52<17:09, 3.87s/it] 49%|████▉ | 255/520 [15:56<17:09, 3.89s/it] {'loss': 1.2722, 'grad_norm': 0.002216046638663969, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [15:56<17:09, 3.89s/it] 49%|████▉ | 256/520 [16:00<17:04, 3.88s/it] {'loss': 1.3217, 'grad_norm': 0.0020981342502172213, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:00<17:04, 3.88s/it] 49%|████▉ | 257/520 [16:04<17:02, 3.89s/it] {'loss': 1.3122, 'grad_norm': 0.0021213010093850218, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:04<17:02, 3.89s/it] 50%|████▉ | 258/520 [16:08<17:01, 3.90s/it] {'loss': 1.3239, 'grad_norm': 0.0018067031757229418, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:08<17:01, 3.90s/it] 50%|████▉ | 259/520 [16:11<16:52, 3.88s/it] {'loss': 1.3918, 'grad_norm': 0.0023343547329577824, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:11<16:52, 3.88s/it] 50%|█████ | 260/520 [16:15<16:30, 3.81s/it] {'loss': 1.3987, 'grad_norm': 0.0018399532733083203, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:15<16:30, 3.81s/it] 50%|█████ | 261/520 [16:19<16:14, 3.76s/it] {'loss': 1.3379, 'grad_norm': 0.002078732962045625, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:19<16:14, 3.76s/it] 50%|█████ | 262/520 [16:22<16:02, 3.73s/it] {'loss': 1.23, 'grad_norm': 0.002086953232605071, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:22<16:02, 3.73s/it] 51%|█████ | 263/520 [16:26<15:53, 3.71s/it] {'loss': 1.3422, 'grad_norm': 0.0020826000367226015, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:26<15:53, 3.71s/it] 51%|█████ | 264/520 [16:30<15:46, 3.70s/it] {'loss': 1.3499, 'grad_norm': 0.0019861878523234106, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:30<15:46, 3.70s/it] 51%|█████ | 265/520 [16:33<15:38, 3.68s/it] {'loss': 1.2493, 'grad_norm': 0.0023060387436996126, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:33<15:38, 3.68s/it] 51%|█████ | 266/520 [16:37<15:30, 3.66s/it] {'loss': 1.1023, 'grad_norm': 0.0017601992800348122, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:37<15:30, 3.66s/it] 51%|█████▏ | 267/520 [16:41<15:25, 3.66s/it] {'loss': 1.2484, 'grad_norm': 0.0019072524754587708, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:41<15:25, 3.66s/it] 52%|█████▏ | 268/520 [16:44<15:19, 3.65s/it] {'loss': 1.4702, 'grad_norm': 0.002587637824287678, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:44<15:19, 3.65s/it] 52%|█████▏ | 269/520 [16:48<15:19, 3.67s/it] {'loss': 1.359, 'grad_norm': 0.0020878487752088596, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:48<15:19, 3.67s/it] 52%|█████▏ | 270/520 [16:52<15:14, 3.66s/it] {'loss': 1.2621, 'grad_norm': 0.0019275843098873688, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:52<15:14, 3.66s/it] 52%|█████▏ | 271/520 [16:55<15:09, 3.65s/it] {'loss': 1.3512, 'grad_norm': 0.0020474539862080617, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [16:55<15:09, 3.65s/it] 52%|█████▏ | 272/520 [16:59<15:04, 3.65s/it] {'loss': 1.2857, 'grad_norm': 0.0022057545705521683, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [16:59<15:04, 3.65s/it] 52%|█████▎ | 273/520 [17:03<14:58, 3.64s/it] {'loss': 1.4193, 'grad_norm': 0.0028805260636044563, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:03<14:58, 3.64s/it] 53%|█████▎ | 274/520 [17:06<14:53, 3.63s/it] {'loss': 1.3045, 'grad_norm': 0.0021785046647182496, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:06<14:53, 3.63s/it] 53%|█████▎ | 275/520 [17:10<14:48, 3.63s/it] {'loss': 1.2491, 'grad_norm': 0.002107084263006542, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:10<14:48, 3.63s/it] 53%|█████▎ | 276/520 [17:13<14:43, 3.62s/it] {'loss': 1.3333, 'grad_norm': 0.002194710617221743, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:13<14:43, 3.62s/it] 53%|█████▎ | 277/520 [17:17<14:38, 3.62s/it] {'loss': 1.4022, 'grad_norm': 0.002074635719372922, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:17<14:38, 3.62s/it] 53%|█████▎ | 278/520 [17:21<14:36, 3.62s/it] {'loss': 1.1954, 'grad_norm': 0.0019208332565282538, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:21<14:36, 3.62s/it] 54%|█████▎ | 279/520 [17:24<14:33, 3.62s/it] {'loss': 1.3015, 'grad_norm': 0.0023808164594343166, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:24<14:33, 3.62s/it] 54%|█████▍ | 280/520 [17:28<14:32, 3.63s/it] {'loss': 1.2568, 'grad_norm': 0.0022666505005937477, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:28<14:32, 3.63s/it] 54%|█████▍ | 281/520 [17:32<14:27, 3.63s/it] {'loss': 1.3562, 'grad_norm': 0.0021190629846393446, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:32<14:27, 3.63s/it] 54%|█████▍ | 282/520 [17:35<14:26, 3.64s/it] {'loss': 1.2134, 'grad_norm': 0.00186227048163908, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:35<14:26, 3.64s/it] 54%|█████▍ | 283/520 [17:39<14:23, 3.64s/it] {'loss': 1.3813, 'grad_norm': 0.0021772826875909677, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:39<14:23, 3.64s/it] 55%|█████▍ | 284/520 [17:42<14:19, 3.64s/it] {'loss': 1.2709, 'grad_norm': 0.002235277734236088, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:42<14:19, 3.64s/it] 55%|█████▍ | 285/520 [17:46<14:20, 3.66s/it] {'loss': 1.2439, 'grad_norm': 0.0021009677494623676, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:46<14:20, 3.66s/it] 55%|█████▌ | 286/520 [17:50<14:17, 3.67s/it] {'loss': 1.1076, 'grad_norm': 0.002046703164585016, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:50<14:17, 3.67s/it] 55%|█████▌ | 287/520 [17:54<14:13, 3.66s/it] {'loss': 1.3551, 'grad_norm': 0.0021847135635197842, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [17:54<14:13, 3.66s/it] 55%|█████▌ | 288/520 [17:57<14:13, 3.68s/it] {'loss': 1.3985, 'grad_norm': 0.002057218976809136, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [17:57<14:13, 3.68s/it] 56%|█████▌ | 289/520 [18:01<14:10, 3.68s/it] {'loss': 1.2579, 'grad_norm': 0.00195726808555543, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:01<14:10, 3.68s/it] 56%|█████▌ | 290/520 [18:05<14:07, 3.68s/it] {'loss': 1.1702, 'grad_norm': 0.0018733493864803003, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:05<14:07, 3.68s/it] 56%|█████▌ | 291/520 [18:08<14:06, 3.70s/it] {'loss': 1.2367, 'grad_norm': 0.0021875009558342934, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:08<14:06, 3.70s/it] 56%|█████▌ | 292/520 [18:12<14:01, 3.69s/it] {'loss': 1.2825, 'grad_norm': 0.001974351797347558, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:12<14:01, 3.69s/it] 56%|█████▋ | 293/520 [18:16<13:57, 3.69s/it] {'loss': 1.2234, 'grad_norm': 0.0021628130450235517, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:16<13:57, 3.69s/it] 57%|█████▋ | 294/520 [18:19<13:52, 3.69s/it] {'loss': 1.249, 'grad_norm': 0.0021323259627889315, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:19<13:52, 3.69s/it] 57%|█████▋ | 295/520 [18:23<13:46, 3.67s/it] {'loss': 1.3493, 'grad_norm': 0.0023851109114566126, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:23<13:46, 3.67s/it] 57%|█████▋ | 296/520 [18:27<13:44, 3.68s/it] {'loss': 1.1971, 'grad_norm': 0.0022738828293823276, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:27<13:44, 3.68s/it] 57%|█████▋ | 297/520 [18:30<13:38, 3.67s/it] {'loss': 1.329, 'grad_norm': 0.002213135444637977, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:30<13:38, 3.67s/it] 57%|█████▋ | 298/520 [18:34<13:33, 3.66s/it] {'loss': 1.2867, 'grad_norm': 0.0017486268032926825, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:34<13:33, 3.66s/it] 57%|█████▊ | 299/520 [18:38<13:28, 3.66s/it] {'loss': 1.348, 'grad_norm': 0.0019150967582474267, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:38<13:28, 3.66s/it] 58%|█████▊ | 300/520 [18:41<13:23, 3.65s/it] {'loss': 1.3442, 'grad_norm': 0.0020529690682301485, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:41<13:23, 3.65s/it] 58%|█████▊ | 301/520 [18:45<13:20, 3.65s/it] {'loss': 1.3167, 'grad_norm': 0.002003386990234586, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:45<13:20, 3.65s/it] 58%|█████▊ | 302/520 [18:49<13:16, 3.66s/it] {'loss': 1.3707, 'grad_norm': 0.0019853794894479737, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:49<13:16, 3.66s/it] 58%|█████▊ | 303/520 [18:52<13:13, 3.66s/it] {'loss': 1.2489, 'grad_norm': 0.002236388409222696, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:52<13:13, 3.66s/it] 58%|█████▊ | 304/520 [18:56<13:08, 3.65s/it] {'loss': 1.2637, 'grad_norm': 0.002088915136574477, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [18:56<13:08, 3.65s/it] 59%|█████▊ | 305/520 [19:00<13:07, 3.66s/it] {'loss': 1.3601, 'grad_norm': 0.002332449797510248, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:00<13:07, 3.66s/it] 59%|█████▉ | 306/520 [19:03<13:03, 3.66s/it] {'loss': 1.3031, 'grad_norm': 0.002047277336076224, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:03<13:03, 3.66s/it] 59%|█████▉ | 307/520 [19:07<13:22, 3.77s/it] {'loss': 1.2337, 'grad_norm': 0.001849619643305715, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:07<13:22, 3.77s/it] 59%|█████▉ | 308/520 [19:11<13:11, 3.73s/it] {'loss': 1.3547, 'grad_norm': 0.001986673679425375, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:11<13:11, 3.73s/it] 59%|█████▉ | 309/520 [19:15<13:07, 3.73s/it] {'loss': 1.2293, 'grad_norm': 0.0019202798700388225, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:15<13:07, 3.73s/it] 60%|█████▉ | 310/520 [19:18<12:57, 3.70s/it] {'loss': 1.2058, 'grad_norm': 0.001927408476785842, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:18<12:57, 3.70s/it] 60%|█████▉ | 311/520 [19:22<12:51, 3.69s/it] {'loss': 1.184, 'grad_norm': 0.0019522642089678452, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:22<12:51, 3.69s/it] 60%|██████ | 312/520 [19:26<12:46, 3.69s/it] {'loss': 1.1694, 'grad_norm': 0.0020822010297871316, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:26<12:46, 3.69s/it] 60%|██████ | 313/520 [19:29<12:42, 3.68s/it] {'loss': 1.1607, 'grad_norm': 0.0017792607381336301, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:29<12:42, 3.68s/it] 60%|██████ | 314/520 [19:33<13:04, 3.81s/it] {'loss': 1.205, 'grad_norm': 0.001813552283913022, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:33<13:04, 3.81s/it] 61%|██████ | 315/520 [19:37<12:50, 3.76s/it] {'loss': 1.3241, 'grad_norm': 0.0025754575757042475, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:37<12:50, 3.76s/it] 61%|██████ | 316/520 [19:41<13:04, 3.85s/it] {'loss': 1.1741, 'grad_norm': 0.0023725922071501275, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:41<13:04, 3.85s/it] 61%|██████ | 317/520 [19:45<12:49, 3.79s/it] {'loss': 1.1982, 'grad_norm': 0.0018576257874043205, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:45<12:49, 3.79s/it] 61%|██████ | 318/520 [19:48<12:39, 3.76s/it] {'loss': 1.3222, 'grad_norm': 0.0021871476013983937, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:48<12:39, 3.76s/it] 61%|██████▏ | 319/520 [19:53<12:55, 3.86s/it] {'loss': 1.1797, 'grad_norm': 0.002015440448896753, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [19:53<12:55, 3.86s/it] 62%|██████▏ | 320/520 [19:56<12:40, 3.80s/it] {'loss': 1.129, 'grad_norm': 0.0020815269553611216, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [19:56<12:40, 3.80s/it] 62%|██████▏ | 321/520 [20:00<12:26, 3.75s/it] {'loss': 1.3297, 'grad_norm': 0.0021107068842846233, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:00<12:26, 3.75s/it] 62%|██████▏ | 322/520 [20:04<12:18, 3.73s/it] {'loss': 1.1947, 'grad_norm': 0.0019414339010874427, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:04<12:18, 3.73s/it] 62%|██████▏ | 323/520 [20:07<12:10, 3.71s/it] {'loss': 1.2665, 'grad_norm': 0.0020781343592536014, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:07<12:10, 3.71s/it] 62%|██████▏ | 324/520 [20:11<12:02, 3.68s/it] {'loss': 1.2593, 'grad_norm': 0.002066687117165876, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:11<12:02, 3.68s/it] 62%|██████▎ | 325/520 [20:14<11:56, 3.68s/it] {'loss': 1.2724, 'grad_norm': 0.0020676091886885745, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:14<11:56, 3.68s/it] 63%|██████▎ | 326/520 [20:18<11:50, 3.66s/it] {'loss': 1.2517, 'grad_norm': 0.001958426603765309, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:18<11:50, 3.66s/it] 63%|██████▎ | 327/520 [20:22<11:43, 3.65s/it] {'loss': 1.3371, 'grad_norm': 0.002430653605513889, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:22<11:43, 3.65s/it] 63%|██████▎ | 328/520 [20:25<11:39, 3.64s/it] {'loss': 1.3217, 'grad_norm': 0.002113219321831629, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:25<11:39, 3.64s/it] 63%|██████▎ | 329/520 [20:29<11:36, 3.64s/it] {'loss': 1.176, 'grad_norm': 0.001767275764543658, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:29<11:36, 3.64s/it] 63%|██████▎ | 330/520 [20:33<11:31, 3.64s/it] {'loss': 1.2536, 'grad_norm': 0.0017734381478109727, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:33<11:31, 3.64s/it] 64%|██████▎ | 331/520 [20:36<11:28, 3.64s/it] {'loss': 1.2087, 'grad_norm': 0.001861997309551269, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:36<11:28, 3.64s/it] 64%|██████▍ | 332/520 [20:40<11:24, 3.64s/it] {'loss': 1.3443, 'grad_norm': 0.0018888061532447741, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:40<11:24, 3.64s/it] 64%|██████▍ | 333/520 [20:44<11:21, 3.64s/it] {'loss': 1.3728, 'grad_norm': 0.0021856274083771367, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:44<11:21, 3.64s/it] 64%|██████▍ | 334/520 [20:47<11:20, 3.66s/it] {'loss': 1.2606, 'grad_norm': 0.0022817386819419545, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:47<11:20, 3.66s/it] 64%|██████▍ | 335/520 [20:51<11:20, 3.68s/it] {'loss': 1.2558, 'grad_norm': 0.001810850561441856, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:51<11:20, 3.68s/it] 65%|██████▍ | 336/520 [20:55<11:17, 3.68s/it] {'loss': 1.1446, 'grad_norm': 0.002129303373480683, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [20:55<11:17, 3.68s/it] 65%|██████▍ | 337/520 [20:58<11:14, 3.69s/it] {'loss': 1.1389, 'grad_norm': 0.0019405016807021208, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [20:58<11:14, 3.69s/it] 65%|██████▌ | 338/520 [21:02<11:10, 3.69s/it] {'loss': 1.2692, 'grad_norm': 0.002013974033757598, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:02<11:10, 3.69s/it] 65%|██████▌ | 339/520 [21:06<11:10, 3.70s/it] {'loss': 1.2102, 'grad_norm': 0.00200485234008405, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:06<11:10, 3.70s/it] 65%|██████▌ | 340/520 [21:09<11:06, 3.70s/it] {'loss': 1.2019, 'grad_norm': 0.001989372413983426, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:09<11:06, 3.70s/it] 66%|██████▌ | 341/520 [21:13<11:00, 3.69s/it] {'loss': 1.2217, 'grad_norm': 0.0020664681237500416, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:13<11:00, 3.69s/it] 66%|██████▌ | 342/520 [21:17<10:54, 3.67s/it] {'loss': 1.3068, 'grad_norm': 0.0025071725959970367, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:17<10:54, 3.67s/it] 66%|██████▌ | 343/520 [21:20<10:50, 3.67s/it] {'loss': 1.2718, 'grad_norm': 0.002012047087173804, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:20<10:50, 3.67s/it] 66%|██████▌ | 344/520 [21:24<10:57, 3.73s/it] {'loss': 1.1689, 'grad_norm': 0.0020113354758574164, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:24<10:57, 3.73s/it] 66%|██████▋ | 345/520 [21:28<11:00, 3.77s/it] {'loss': 1.288, 'grad_norm': 0.0022546570617351497, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:28<11:00, 3.77s/it] 67%|██████▋ | 346/520 [21:32<11:01, 3.80s/it] {'loss': 1.2702, 'grad_norm': 0.0019079908928060605, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:32<11:01, 3.80s/it] 67%|██████▋ | 347/520 [21:36<11:03, 3.83s/it] {'loss': 1.1888, 'grad_norm': 0.0018422267699457005, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:36<11:03, 3.83s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:40<11:02, 3.85s/it] {'loss': 1.1482, 'grad_norm': 0.002299605879484128, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:40<11:02, 3.85s/it] 67%|██████▋ | 349/520 [21:44<10:58, 3.85s/it] {'loss': 1.1912, 'grad_norm': 0.002081041312916175, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:44<10:58, 3.85s/it] 67%|██████▋ | 350/520 [21:48<10:57, 3.87s/it] {'loss': 1.2332, 'grad_norm': 0.002054013045128589, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:48<10:57, 3.87s/it] 68%|██████▊ | 351/520 [21:51<10:53, 3.87s/it] {'loss': 1.14, 'grad_norm': 0.0018549131302556985, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:51<10:53, 3.87s/it] 68%|██████▊ | 352/520 [21:55<10:48, 3.86s/it] {'loss': 1.2619, 'grad_norm': 0.0019386036783142933, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [21:55<10:48, 3.86s/it] 68%|██████▊ | 353/520 [21:59<10:37, 3.81s/it] {'loss': 1.2168, 'grad_norm': 0.0016524444596531034, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [21:59<10:37, 3.81s/it] 68%|██████▊ | 354/520 [22:03<10:25, 3.77s/it] {'loss': 1.3576, 'grad_norm': 0.0019518731779614871, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:03<10:25, 3.77s/it] 68%|██████▊ | 355/520 [22:06<10:17, 3.74s/it] {'loss': 1.1988, 'grad_norm': 0.0019912762631046892, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:06<10:17, 3.74s/it] 68%|██████▊ | 356/520 [22:10<10:11, 3.73s/it] {'loss': 1.1976, 'grad_norm': 0.0019312104007101188, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:10<10:11, 3.73s/it] 69%|██████▊ | 357/520 [22:14<10:05, 3.71s/it] {'loss': 1.2274, 'grad_norm': 0.0018112691351552818, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:14<10:05, 3.71s/it] 69%|██████▉ | 358/520 [22:17<09:59, 3.70s/it] {'loss': 1.1545, 'grad_norm': 0.0019454836859586495, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:17<09:59, 3.70s/it] 69%|██████▉ | 359/520 [22:21<09:54, 3.69s/it] {'loss': 1.285, 'grad_norm': 0.0021811704303208237, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:21<09:54, 3.69s/it] 69%|██████▉ | 360/520 [22:25<09:50, 3.69s/it] {'loss': 1.3085, 'grad_norm': 0.002058434360054392, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:25<09:50, 3.69s/it] 69%|██████▉ | 361/520 [22:28<09:45, 3.68s/it] {'loss': 1.2993, 'grad_norm': 0.0018022795872335653, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:28<09:45, 3.68s/it] 70%|██████▉ | 362/520 [22:32<09:42, 3.68s/it] {'loss': 1.2211, 'grad_norm': 0.002124205300715167, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:32<09:42, 3.68s/it] 70%|██████▉ | 363/520 [22:36<09:42, 3.71s/it] {'loss': 1.2377, 'grad_norm': 0.0018897894325927513, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:36<09:42, 3.71s/it] 70%|███████ | 364/520 [22:40<09:49, 3.78s/it] {'loss': 1.3133, 'grad_norm': 0.001985435101888764, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:40<09:49, 3.78s/it] 70%|███████ | 365/520 [22:44<09:49, 3.80s/it] {'loss': 1.3037, 'grad_norm': 0.002045469910544275, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:44<09:49, 3.80s/it] 70%|███████ | 366/520 [22:48<09:49, 3.83s/it] {'loss': 1.2538, 'grad_norm': 0.0018816686022775206, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:48<09:49, 3.83s/it] 71%|███████ | 367/520 [22:51<09:47, 3.84s/it] {'loss': 1.2465, 'grad_norm': 0.0019132955130137218, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [22:51<09:47, 3.84s/it] 71%|███████ | 368/520 [22:55<09:46, 3.86s/it] {'loss': 1.1052, 'grad_norm': 0.0019881669980730235, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [22:55<09:46, 3.86s/it] 71%|███████ | 369/520 [22:59<09:44, 3.87s/it] {'loss': 1.2622, 'grad_norm': 0.0017901862444656025, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [22:59<09:44, 3.87s/it] 71%|███████ | 370/520 [23:03<09:40, 3.87s/it] {'loss': 1.165, 'grad_norm': 0.001775462451934345, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:03<09:40, 3.87s/it] 71%|███████▏ | 371/520 [23:07<09:38, 3.88s/it] {'loss': 1.1661, 'grad_norm': 0.002067286517468023, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:07<09:38, 3.88s/it] 72%|███████▏ | 372/520 [23:11<09:35, 3.89s/it] {'loss': 1.3547, 'grad_norm': 0.0017778618506596934, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:11<09:35, 3.89s/it] 72%|███████▏ | 373/520 [23:15<09:29, 3.87s/it] {'loss': 1.2353, 'grad_norm': 0.002072110557150221, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:15<09:29, 3.87s/it] 72%|███████▏ | 374/520 [23:18<09:17, 3.82s/it] {'loss': 1.2474, 'grad_norm': 0.001914559738347956, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:18<09:17, 3.82s/it] 72%|███████▏ | 375/520 [23:22<09:10, 3.80s/it] {'loss': 1.1608, 'grad_norm': 0.0019640544533290776, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:22<09:10, 3.80s/it] 72%|███████▏ | 376/520 [23:26<09:04, 3.78s/it] {'loss': 1.2787, 'grad_norm': 0.00177644586330719, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:26<09:04, 3.78s/it] 72%|███████▎ | 377/520 [23:30<08:55, 3.74s/it] {'loss': 1.2201, 'grad_norm': 0.0019554705221795756, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:30<08:55, 3.74s/it] 73%|███████▎ | 378/520 [23:33<08:47, 3.71s/it] {'loss': 1.2739, 'grad_norm': 0.0019129728744286662, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:33<08:47, 3.71s/it] 73%|███████▎ | 379/520 [23:37<08:43, 3.71s/it] {'loss': 1.2505, 'grad_norm': 0.0018400171188818425, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:37<08:43, 3.71s/it] 73%|███████▎ | 380/520 [23:41<08:37, 3.70s/it] {'loss': 1.3281, 'grad_norm': 0.0020878171632242055, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:41<08:37, 3.70s/it] 73%|███████▎ | 381/520 [23:44<08:32, 3.69s/it] {'loss': 1.2477, 'grad_norm': 0.001907053766748275, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:44<08:32, 3.69s/it] 73%|███████▎ | 382/520 [23:48<08:28, 3.69s/it] {'loss': 1.2809, 'grad_norm': 0.0019398224909793046, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:48<08:28, 3.69s/it] 74%|███████▎ | 383/520 [23:52<08:23, 3.67s/it] {'loss': 1.0855, 'grad_norm': 0.0020136212979912737, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:52<08:23, 3.67s/it] 74%|███████▍ | 384/520 [23:55<08:19, 3.68s/it] {'loss': 1.3603, 'grad_norm': 0.0019492286806057069, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [23:55<08:19, 3.68s/it] 74%|███████▍ | 385/520 [23:59<08:15, 3.67s/it] {'loss': 1.2272, 'grad_norm': 0.001817009381047559, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [23:59<08:15, 3.67s/it] 74%|███████▍ | 386/520 [24:03<08:12, 3.67s/it] {'loss': 1.1742, 'grad_norm': 0.001676042556394445, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:03<08:12, 3.67s/it] 74%|███████▍ | 387/520 [24:06<08:09, 3.68s/it] {'loss': 1.3525, 'grad_norm': 0.0018644994212294833, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:06<08:09, 3.68s/it] 75%|███████▍ | 388/520 [24:10<08:04, 3.67s/it] {'loss': 1.1264, 'grad_norm': 0.001793201788338274, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:10<08:04, 3.67s/it] 75%|███████▍ | 389/520 [24:14<08:00, 3.67s/it] {'loss': 1.1833, 'grad_norm': 0.002190190261787149, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:14<08:00, 3.67s/it] 75%|███████▌ | 390/520 [24:17<07:57, 3.67s/it] {'loss': 1.2485, 'grad_norm': 0.0018444980643636606, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:17<07:57, 3.67s/it] 75%|███████▌ | 391/520 [24:21<07:54, 3.68s/it] {'loss': 1.3287, 'grad_norm': 0.0020026021501690383, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:21<07:54, 3.68s/it] 75%|███████▌ | 392/520 [24:25<07:50, 3.68s/it] {'loss': 1.1342, 'grad_norm': 0.0018107786722780731, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:25<07:50, 3.68s/it] 76%|███████▌ | 393/520 [24:28<07:47, 3.68s/it] {'loss': 1.1721, 'grad_norm': 0.001683427940643862, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:28<07:47, 3.68s/it] 76%|███████▌ | 394/520 [24:32<07:42, 3.67s/it] {'loss': 1.1999, 'grad_norm': 0.002102266492346685, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:32<07:42, 3.67s/it] 76%|███████▌ | 395/520 [24:36<07:39, 3.68s/it] {'loss': 1.1616, 'grad_norm': 0.002199979642029788, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:36<07:39, 3.68s/it] 76%|███████▌ | 396/520 [24:39<07:35, 3.67s/it] {'loss': 1.2465, 'grad_norm': 0.0019552365378068593, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:39<07:35, 3.67s/it] 76%|███████▋ | 397/520 [24:43<07:32, 3.68s/it] {'loss': 1.2316, 'grad_norm': 0.0018023154179966827, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:43<07:32, 3.68s/it] 77%|███████▋ | 398/520 [24:47<07:32, 3.71s/it] {'loss': 1.2266, 'grad_norm': 0.001937398908117515, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:47<07:32, 3.71s/it] 77%|███████▋ | 399/520 [24:51<07:32, 3.74s/it] {'loss': 1.216, 'grad_norm': 0.0018501726728092506, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [24:51<07:32, 3.74s/it] 77%|███████▋ | 400/520 [24:54<07:31, 3.76s/it] {'loss': 1.2602, 'grad_norm': 0.0018143898409461578, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [24:54<07:31, 3.76s/it] 77%|███████▋ | 401/520 [24:58<07:29, 3.78s/it] {'loss': 1.0518, 'grad_norm': 0.00199555776390757, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [24:58<07:29, 3.78s/it] 77%|███████▋ | 402/520 [25:02<07:26, 3.78s/it] {'loss': 1.1732, 'grad_norm': 0.0020223985443566968, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:02<07:26, 3.78s/it] 78%|███████▊ | 403/520 [25:06<07:23, 3.79s/it] {'loss': 1.2048, 'grad_norm': 0.002166919830864113, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:06<07:23, 3.79s/it] 78%|███████▊ | 404/520 [25:10<07:20, 3.80s/it] {'loss': 1.1065, 'grad_norm': 0.0023288988552994587, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:10<07:20, 3.80s/it] 78%|███████▊ | 405/520 [25:14<07:18, 3.81s/it] {'loss': 1.2218, 'grad_norm': 0.001851532027572708, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:14<07:18, 3.81s/it] 78%|███████▊ | 406/520 [25:17<07:14, 3.82s/it] {'loss': 1.1509, 'grad_norm': 0.0022227464287857095, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:17<07:14, 3.82s/it] 78%|███████▊ | 407/520 [25:21<07:10, 3.81s/it] {'loss': 1.2958, 'grad_norm': 0.0019451104590252324, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:21<07:10, 3.81s/it] 78%|███████▊ | 408/520 [25:25<07:08, 3.82s/it] {'loss': 1.1879, 'grad_norm': 0.0020171256533495683, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:25<07:08, 3.82s/it] 79%|███████▊ | 409/520 [25:29<07:03, 3.82s/it] {'loss': 1.3162, 'grad_norm': 0.002140531057362246, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:29<07:03, 3.82s/it] 79%|███████▉ | 410/520 [25:33<07:00, 3.82s/it] {'loss': 1.0359, 'grad_norm': 0.0018552556283979687, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:33<07:00, 3.82s/it] 79%|███████▉ | 411/520 [25:37<06:56, 3.82s/it] {'loss': 1.2979, 'grad_norm': 0.0022468014942204507, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:37<06:56, 3.82s/it] 79%|███████▉ | 412/520 [25:40<06:52, 3.82s/it] {'loss': 1.2055, 'grad_norm': 0.0019086448558400233, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:40<06:52, 3.82s/it] 79%|███████▉ | 413/520 [25:44<06:48, 3.82s/it] {'loss': 1.2405, 'grad_norm': 0.0018154740072654741, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:44<06:48, 3.82s/it] 80%|███████▉ | 414/520 [25:48<06:46, 3.83s/it] {'loss': 1.0465, 'grad_norm': 0.00172328582896455, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:48<06:46, 3.83s/it] 80%|███████▉ | 415/520 [25:52<06:42, 3.83s/it] {'loss': 1.1777, 'grad_norm': 0.0018860027602833986, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [25:52<06:42, 3.83s/it] 80%|████████ | 416/520 [25:56<06:38, 3.83s/it] {'loss': 1.1004, 'grad_norm': 0.002330219603623276, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [25:56<06:38, 3.83s/it] 80%|████████ | 417/520 [25:59<06:33, 3.82s/it] {'loss': 1.2624, 'grad_norm': 0.002178428332506809, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [25:59<06:33, 3.82s/it] 80%|████████ | 418/520 [26:03<06:29, 3.82s/it] {'loss': 1.2464, 'grad_norm': 0.00189244062433382, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:03<06:29, 3.82s/it] 81%|████████ | 419/520 [26:07<06:24, 3.80s/it] {'loss': 1.2346, 'grad_norm': 0.00201978492434637, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:07<06:24, 3.80s/it] 81%|████████ | 420/520 [26:11<06:19, 3.80s/it] {'loss': 1.1238, 'grad_norm': 0.002033377208293811, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:11<06:19, 3.80s/it] 81%|████████ | 421/520 [26:15<06:15, 3.79s/it] {'loss': 1.0542, 'grad_norm': 0.002116471076569466, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:15<06:15, 3.79s/it] 81%|████████ | 422/520 [26:18<06:07, 3.75s/it] {'loss': 1.1781, 'grad_norm': 0.002005497381841075, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:18<06:07, 3.75s/it] 81%|████████▏ | 423/520 [26:22<06:00, 3.71s/it] {'loss': 1.1644, 'grad_norm': 0.0022573479522653784, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:22<06:00, 3.71s/it] 82%|████████▏ | 424/520 [26:26<05:55, 3.70s/it] {'loss': 1.3295, 'grad_norm': 0.0020966940466947853, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:26<05:55, 3.70s/it] 82%|████████▏ | 425/520 [26:29<05:50, 3.69s/it] {'loss': 1.1715, 'grad_norm': 0.001851887996933743, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:29<05:50, 3.69s/it] 82%|████████▏ | 426/520 [26:33<05:47, 3.70s/it] {'loss': 1.1951, 'grad_norm': 0.002577879636513327, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:33<05:47, 3.70s/it] 82%|████████▏ | 427/520 [26:37<05:43, 3.70s/it] {'loss': 1.1075, 'grad_norm': 0.0018646251809529568, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:37<05:43, 3.70s/it] 82%|████████▏ | 428/520 [26:40<05:39, 3.69s/it] {'loss': 1.0832, 'grad_norm': 0.001963297415986919, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:40<05:39, 3.69s/it] 82%|████████▎ | 429/520 [26:44<05:35, 3.68s/it] {'loss': 1.1841, 'grad_norm': 0.0018816614318905662, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:44<05:35, 3.68s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:48<05:31, 3.68s/it] {'loss': 1.1834, 'grad_norm': 0.0017419214960373048, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:48<05:31, 3.68s/it] 83%|████████▎ | 431/520 [26:51<05:27, 3.68s/it] {'loss': 1.2103, 'grad_norm': 0.0019651905841702286, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [26:51<05:27, 3.68s/it] 83%|████████▎ | 432/520 [26:55<05:23, 3.67s/it] {'loss': 1.093, 'grad_norm': 0.0020241909494257662, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [26:55<05:23, 3.67s/it] 83%|████████▎ | 433/520 [26:59<05:18, 3.66s/it] {'loss': 1.2259, 'grad_norm': 0.001905936319297456, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [26:59<05:18, 3.66s/it] 83%|████████▎ | 434/520 [27:02<05:15, 3.67s/it] {'loss': 0.9657, 'grad_norm': 0.001872709986899173, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:02<05:15, 3.67s/it] 84%|████████▎ | 435/520 [27:06<05:11, 3.66s/it] {'loss': 1.263, 'grad_norm': 0.002305091711655817, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:06<05:11, 3.66s/it] 84%|████████▍ | 436/520 [27:10<05:06, 3.65s/it] {'loss': 1.0541, 'grad_norm': 0.0019223437261483955, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:10<05:06, 3.65s/it] 84%|████████▍ | 437/520 [27:13<05:02, 3.65s/it] {'loss': 1.2894, 'grad_norm': 0.0019081820680133053, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:13<05:02, 3.65s/it] 84%|████████▍ | 438/520 [27:17<04:59, 3.65s/it] {'loss': 1.0963, 'grad_norm': 0.001924662338888528, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:17<04:59, 3.65s/it] 84%|████████▍ | 439/520 [27:21<04:55, 3.65s/it] {'loss': 1.1825, 'grad_norm': 0.0015998748813333518, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:21<04:55, 3.65s/it] 85%|████████▍ | 440/520 [27:24<04:52, 3.65s/it] {'loss': 1.1443, 'grad_norm': 0.0020409036217333824, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:24<04:52, 3.65s/it] 85%|████████▍ | 441/520 [27:28<04:49, 3.66s/it] {'loss': 1.2085, 'grad_norm': 0.0018684365348363798, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:28<04:49, 3.66s/it] 85%|████████▌ | 442/520 [27:31<04:44, 3.64s/it] {'loss': 1.2047, 'grad_norm': 0.0021647629025678614, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:31<04:44, 3.64s/it] 85%|████████▌ | 443/520 [27:35<04:40, 3.65s/it] {'loss': 1.2177, 'grad_norm': 0.001910077439021275, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:35<04:40, 3.65s/it] 85%|████████▌ | 444/520 [27:39<04:37, 3.65s/it] {'loss': 1.1856, 'grad_norm': 0.001719494286694497, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:39<04:37, 3.65s/it] 86%|████████▌ | 445/520 [27:42<04:34, 3.66s/it] {'loss': 1.1082, 'grad_norm': 0.0018501241143156484, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:42<04:34, 3.66s/it] 86%|████████▌ | 446/520 [27:46<04:30, 3.66s/it] {'loss': 1.2868, 'grad_norm': 0.0018086568499141298, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:46<04:30, 3.66s/it] 86%|████████▌ | 447/520 [27:50<04:27, 3.67s/it] {'loss': 1.1935, 'grad_norm': 0.0018762291839450192, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [27:50<04:27, 3.67s/it] 86%|████████▌ | 448/520 [27:53<04:23, 3.66s/it] {'loss': 1.1728, 'grad_norm': 0.002058930284061775, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [27:53<04:23, 3.66s/it] 86%|████████▋ | 449/520 [27:57<04:20, 3.67s/it] {'loss': 1.2427, 'grad_norm': 0.00192490501539442, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [27:57<04:20, 3.67s/it] 87%|████████▋ | 450/520 [28:01<04:16, 3.66s/it] {'loss': 1.2126, 'grad_norm': 0.0019245333416628737, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:01<04:16, 3.66s/it] 87%|████████▋ | 451/520 [28:04<04:13, 3.67s/it] {'loss': 1.2092, 'grad_norm': 0.0019984656762084066, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:04<04:13, 3.67s/it] 87%|████████▋ | 452/520 [28:08<04:08, 3.66s/it] {'loss': 1.2807, 'grad_norm': 0.0018137022240154864, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:08<04:08, 3.66s/it] 87%|████████▋ | 453/520 [28:12<04:05, 3.66s/it] {'loss': 1.26, 'grad_norm': 0.0018407564241755933, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:12<04:05, 3.66s/it] 87%|████████▋ | 454/520 [28:15<04:02, 3.67s/it] {'loss': 1.1148, 'grad_norm': 0.001989927617480078, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:15<04:02, 3.67s/it] 88%|████████▊ | 455/520 [28:19<03:58, 3.67s/it] {'loss': 1.2572, 'grad_norm': 0.001901510616760063, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:19<03:58, 3.67s/it] 88%|████████▊ | 456/520 [28:23<03:54, 3.66s/it] {'loss': 1.1722, 'grad_norm': 0.0019390960068964268, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:23<03:54, 3.66s/it] 88%|████████▊ | 457/520 [28:26<03:50, 3.66s/it] {'loss': 1.1942, 'grad_norm': 0.0017872323673568458, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:26<03:50, 3.66s/it] 88%|████████▊ | 458/520 [28:30<03:46, 3.66s/it] {'loss': 1.3158, 'grad_norm': 0.002019768235531703, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:30<03:46, 3.66s/it] 88%|████████▊ | 459/520 [28:34<03:43, 3.66s/it] {'loss': 1.2489, 'grad_norm': 0.0019293883505218455, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:34<03:43, 3.66s/it] 88%|████████▊ | 460/520 [28:37<03:39, 3.67s/it] {'loss': 1.1234, 'grad_norm': 0.001882612771584406, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:37<03:39, 3.67s/it] 89%|████████▊ | 461/520 [28:41<03:36, 3.67s/it] {'loss': 1.2792, 'grad_norm': 0.0016475037999539315, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:41<03:36, 3.67s/it] 89%|████████▉ | 462/520 [28:45<03:33, 3.68s/it] {'loss': 1.3256, 'grad_norm': 0.0018618116912645986, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:45<03:33, 3.68s/it] 89%|████████▉ | 463/520 [28:49<03:30, 3.69s/it] {'loss': 1.0807, 'grad_norm': 0.00203131489140421, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [28:49<03:30, 3.69s/it] 89%|████████▉ | 464/520 [28:52<03:28, 3.73s/it] {'loss': 1.2282, 'grad_norm': 0.0019575505670996664, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [28:52<03:28, 3.73s/it] 89%|████████▉ | 465/520 [28:56<03:27, 3.78s/it] {'loss': 1.3382, 'grad_norm': 0.002122553043383435, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [28:56<03:27, 3.78s/it] 90%|████████▉ | 466/520 [29:00<03:25, 3.81s/it] {'loss': 1.2093, 'grad_norm': 0.0017416253374240268, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:00<03:25, 3.81s/it] 90%|████████▉ | 467/520 [29:04<03:23, 3.84s/it] {'loss': 1.218, 'grad_norm': 0.0018411929402962803, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:04<03:23, 3.84s/it] 90%|█████████ | 468/520 [29:08<03:21, 3.87s/it] {'loss': 1.1929, 'grad_norm': 0.00210723553415649, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:08<03:21, 3.87s/it] 90%|█████████ | 469/520 [29:12<03:17, 3.88s/it] {'loss': 1.245, 'grad_norm': 0.0020224287543567586, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:12<03:17, 3.88s/it] 90%|█████████ | 470/520 [29:16<03:11, 3.83s/it] {'loss': 1.1229, 'grad_norm': 0.0017048737346084986, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:16<03:11, 3.83s/it] 91%|█████████ | 471/520 [29:19<03:05, 3.78s/it] {'loss': 1.1474, 'grad_norm': 0.0019388425866569414, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:19<03:05, 3.78s/it] 91%|█████████ | 472/520 [29:23<03:00, 3.76s/it] {'loss': 1.117, 'grad_norm': 0.0018511009978439917, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:23<03:00, 3.76s/it] 91%|█████████ | 473/520 [29:27<02:55, 3.73s/it] {'loss': 1.1785, 'grad_norm': 0.0019471020766078481, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:27<02:55, 3.73s/it] 91%|█████████ | 474/520 [29:30<02:51, 3.72s/it] {'loss': 1.2521, 'grad_norm': 0.0018194154657788415, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:30<02:51, 3.72s/it] 91%|█████████▏| 475/520 [29:34<02:46, 3.71s/it] {'loss': 1.1632, 'grad_norm': 0.0017588843849701235, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:34<02:46, 3.71s/it] 92%|█████████▏| 476/520 [29:38<02:42, 3.70s/it] {'loss': 1.171, 'grad_norm': 0.0019320457510808057, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:38<02:42, 3.70s/it] 92%|█████████▏| 477/520 [29:41<02:39, 3.70s/it] {'loss': 1.1566, 'grad_norm': 0.002093323848584535, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:41<02:39, 3.70s/it] 92%|█████████▏| 478/520 [29:45<02:35, 3.70s/it] {'loss': 1.1162, 'grad_norm': 0.0018716071741799126, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:45<02:35, 3.70s/it] 92%|█████████▏| 479/520 [29:49<02:31, 3.70s/it] {'loss': 1.2151, 'grad_norm': 0.0019702120781953728, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:49<02:31, 3.70s/it] 92%|█████████▏| 480/520 [29:52<02:28, 3.70s/it] {'loss': 1.2429, 'grad_norm': 0.0017960575134332165, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [29:52<02:28, 3.70s/it] 92%|█████████▎| 481/520 [29:56<02:25, 3.72s/it] {'loss': 1.2381, 'grad_norm': 0.0017845206207169097, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [29:56<02:25, 3.72s/it] 93%|█████████▎| 482/520 [30:00<02:21, 3.73s/it] {'loss': 1.2494, 'grad_norm': 0.00201080836342005, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:00<02:21, 3.73s/it] 93%|█████████▎| 483/520 [30:04<02:17, 3.72s/it] {'loss': 1.1848, 'grad_norm': 0.00204772337300711, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:04<02:17, 3.72s/it] 93%|█████████▎| 484/520 [30:07<02:13, 3.70s/it] {'loss': 1.1913, 'grad_norm': 0.0019126737435090787, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:07<02:13, 3.70s/it] 93%|█████████▎| 485/520 [30:11<02:11, 3.76s/it] {'loss': 1.136, 'grad_norm': 0.0018174781440361042, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:11<02:11, 3.76s/it] 93%|█████████▎| 486/520 [30:15<02:08, 3.79s/it] {'loss': 1.2613, 'grad_norm': 0.0020100629860649298, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:15<02:08, 3.79s/it] 94%|█████████▎| 487/520 [30:19<02:06, 3.82s/it] {'loss': 1.1122, 'grad_norm': 0.0017938758978255874, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:19<02:06, 3.82s/it] 94%|█████████▍| 488/520 [30:23<02:02, 3.83s/it] {'loss': 1.0584, 'grad_norm': 0.0018958165013521755, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:23<02:02, 3.83s/it] 94%|█████████▍| 489/520 [30:27<01:59, 3.84s/it] {'loss': 1.2419, 'grad_norm': 0.0016856627754315641, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:27<01:59, 3.84s/it] 94%|█████████▍| 490/520 [30:31<01:55, 3.84s/it] {'loss': 1.183, 'grad_norm': 0.0020646981426732055, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:31<01:55, 3.84s/it] 94%|█████████▍| 491/520 [30:34<01:51, 3.84s/it] {'loss': 1.1352, 'grad_norm': 0.0019310752415974736, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:34<01:51, 3.84s/it] 95%|█████████▍| 492/520 [30:38<01:47, 3.84s/it] {'loss': 1.2638, 'grad_norm': 0.002035624252208936, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:38<01:47, 3.84s/it] 95%|█████████▍| 493/520 [30:42<01:43, 3.84s/it] {'loss': 1.2836, 'grad_norm': 0.0020066193904996962, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:42<01:43, 3.84s/it] 95%|█████████▌| 494/520 [30:46<01:39, 3.84s/it] {'loss': 1.202, 'grad_norm': 0.0018134426920423273, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:46<01:39, 3.84s/it] 95%|█████████▌| 495/520 [30:50<01:35, 3.84s/it] {'loss': 1.1591, 'grad_norm': 0.0018572901750543637, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [30:50<01:35, 3.84s/it] 95%|█████████▌| 496/520 [30:54<01:32, 3.85s/it] {'loss': 1.0751, 'grad_norm': 0.001902929657242329, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [30:54<01:32, 3.85s/it] 96%|█████████▌| 497/520 [30:57<01:28, 3.86s/it] {'loss': 1.185, 'grad_norm': 0.001690966770810112, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [30:58<01:28, 3.86s/it] 96%|█████████▌| 498/520 [31:01<01:24, 3.85s/it] {'loss': 1.1598, 'grad_norm': 0.001998640165690312, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:01<01:24, 3.85s/it] 96%|█████████▌| 499/520 [31:05<01:20, 3.83s/it] {'loss': 1.3169, 'grad_norm': 0.0019401735975300698, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:05<01:20, 3.83s/it] 96%|█████████▌| 500/520 [31:09<01:15, 3.78s/it] {'loss': 1.2778, 'grad_norm': 0.0022194644060787986, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:09<01:15, 3.78s/it] 96%|█████████▋| 501/520 [31:12<01:11, 3.76s/it] {'loss': 1.2354, 'grad_norm': 0.0021942129217628523, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:12<01:11, 3.76s/it] 97%|█████████▋| 502/520 [31:16<01:07, 3.74s/it] {'loss': 1.1979, 'grad_norm': 0.0018115190456405802, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:16<01:07, 3.74s/it] 97%|█████████▋| 503/520 [31:20<01:03, 3.72s/it] {'loss': 1.2072, 'grad_norm': 0.001975246522021427, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:20<01:03, 3.72s/it] 97%|█████████▋| 504/520 [31:24<00:59, 3.71s/it] {'loss': 1.2009, 'grad_norm': 0.0022612864376014125, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:24<00:59, 3.71s/it] 97%|█████████▋| 505/520 [31:27<00:55, 3.71s/it] {'loss': 1.232, 'grad_norm': 0.0018797159468766148, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:27<00:55, 3.71s/it] 97%|█████████▋| 506/520 [31:31<00:51, 3.70s/it] {'loss': 1.153, 'grad_norm': 0.0020384088298433392, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:31<00:51, 3.70s/it] 98%|█████████▊| 507/520 [31:35<00:48, 3.70s/it] {'loss': 1.3564, 'grad_norm': 0.0018376715757814502, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:35<00:48, 3.70s/it] 98%|█████████▊| 508/520 [31:38<00:44, 3.69s/it] {'loss': 1.2703, 'grad_norm': 0.001926471937057494, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:38<00:44, 3.69s/it] 98%|█████████▊| 509/520 [31:42<00:40, 3.68s/it] {'loss': 1.2362, 'grad_norm': 0.001835901445610021, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:42<00:40, 3.68s/it] 98%|█████████▊| 510/520 [31:46<00:36, 3.69s/it] {'loss': 1.1899, 'grad_norm': 0.0018463594017240482, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:46<00:36, 3.69s/it] 98%|█████████▊| 511/520 [31:49<00:33, 3.70s/it] {'loss': 1.1687, 'grad_norm': 0.001836648949910644, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [31:49<00:33, 3.70s/it] 98%|█████████▊| 512/520 [31:53<00:29, 3.70s/it] {'loss': 1.0444, 'grad_norm': 0.0018584388007448745, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [31:53<00:29, 3.70s/it] 99%|█████████▊| 513/520 [31:57<00:25, 3.70s/it] {'loss': 1.253, 'grad_norm': 0.0022193424007350576, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [31:57<00:25, 3.70s/it] 99%|█████████▉| 514/520 [32:00<00:22, 3.69s/it] {'loss': 1.2196, 'grad_norm': 0.0018221496270594188, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:00<00:22, 3.69s/it] 99%|█████████▉| 515/520 [32:04<00:18, 3.71s/it] {'loss': 1.2661, 'grad_norm': 0.0021441642330599217, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:04<00:18, 3.71s/it] 99%|█████████▉| 516/520 [32:08<00:15, 3.75s/it] {'loss': 1.1606, 'grad_norm': 0.0018546821635863535, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:08<00:15, 3.75s/it] 99%|█████████▉| 517/520 [32:12<00:11, 3.77s/it] {'loss': 1.2753, 'grad_norm': 0.0020507029488416447, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:12<00:11, 3.77s/it] 100%|█████████▉| 518/520 [32:16<00:07, 3.78s/it] {'loss': 1.1831, 'grad_norm': 0.0020754639257841577, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:16<00:07, 3.78s/it] 100%|█████████▉| 519/520 [32:19<00:03, 3.79s/it] {'loss': 1.2156, 'grad_norm': 0.0018668851384050365, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:20<00:03, 3.79s/it] 100%|██████████| 520/520 [32:24<00:00, 4.04s/it] {'loss': 1.263, 'grad_norm': 0.0018835173379864547, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:24<00:00, 4.04s/it] {'train_runtime': 1944.6423, 'train_samples_per_second': 34.211, 'train_steps_per_second': 0.267, 'train_loss': 1.3259545358327718, 'epoch': 1.0} + 100%|██████████| 520/520 [32:24<00:00, 4.04s/it] 100%|██████████| 520/520 [32:24<00:00, 3.74s/it] +[2025-10-13 13:02:59,410] [INFO] [launch.py:348:main] Process 711970 exits successfully. +[2025-10-13 13:02:59,411] [INFO] [launch.py:348:main] Process 711969 exits successfully. +[2025-10-13 13:03:00,412] [INFO] [launch.py:348:main] Process 711973 exits successfully. +[2025-10-13 13:03:00,413] [INFO] [launch.py:348:main] Process 711974 exits successfully. +[2025-10-13 13:03:00,413] [INFO] [launch.py:348:main] Process 711971 exits successfully. +[2025-10-13 13:03:00,413] [INFO] [launch.py:348:main] Process 711968 exits successfully. +[2025-10-13 13:03:00,414] [INFO] [launch.py:348:main] Process 711972 exits successfully. +[2025-10-13 13:03:03,417] [INFO] [launch.py:348:main] Process 711967 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.1_2e-1_connector-3.0_2.1_2e-1_ablation_20251013_113216.log +Timestamp: 2025-10-13 13:03:05 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation_20251013_130305.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation_20251013_130305.log new file mode 100644 index 0000000000000000000000000000000000000000..4e494abac3e2c67db077bbbe80245f028c0bd6fa --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation_20251013_130305.log @@ -0,0 +1,2254 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation_20251013_130305.log +Timestamp: 2025-10-13 13:03:05 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 13:03:08,691] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:12,324] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 13:03:12,325] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 2.3 --temperature_mlp_text 2.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 2.3 --temperature_mlp_vision 2.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 2.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 13:03:14,904] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:15,970] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 13:03:15,970] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 13:03:15,970] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 13:03:15,970] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 13:03:15,970] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 13:03:15,970] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 13:03:15,970] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 13:03:15,973] [INFO] [launch.py:253:main] process 801875 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 13:03:15,975] [INFO] [launch.py:253:main] process 801876 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 13:03:15,977] [INFO] [launch.py:253:main] process 801877 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 13:03:15,979] [INFO] [launch.py:253:main] process 801878 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 13:03:15,981] [INFO] [launch.py:253:main] process 801879 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 13:03:15,984] [INFO] [launch.py:253:main] process 801880 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 13:03:15,986] [INFO] [launch.py:253:main] process 801881 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 13:03:15,988] [INFO] [launch.py:253:main] process 801882 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 13:03:22,810] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:22,810] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:22,821] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:22,823] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:22,865] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:22,865] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:22,866] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:22,874] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 13:03:23,490] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 13:03:23,490] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 13:03:23,490] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 13:03:23,490] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 13:03:23,490] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 13:03:23,491] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 13:03:23,491] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 13:03:23,491] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 13:03:23,493] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 2.3, 'temperature_mlp': 2.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 2.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.3, + "temperature_mlp": 2.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:801875:801875 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801875:801875 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801875:801875 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:801875:801875 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:801875:801875 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:801875:801875 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:801881:801881 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:801881:801881 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801881:801881 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801881:801881 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:801881:801881 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:801881:801881 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:801880:801880 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:801880:801880 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801880:801880 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801880:801880 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:801880:801880 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:801880:801880 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:801878:801878 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:801878:801878 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801878:801878 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801878:801878 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:801878:801878 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:801878:801878 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:801879:801879 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:801879:801879 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801879:801879 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801879:801879 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:801879:801879 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:801879:801879 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:801877:801877 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:801877:801877 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801877:801877 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801877:801877 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:801877:801877 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:801877:801877 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:801876:801876 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:801876:801876 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801876:801876 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801876:801876 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:801876:801876 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:801876:801876 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:801882:801882 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:801882:801882 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801882:801882 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801882:801882 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:801882:801882 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:801882:801882 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO ncclCommInitRank comm 0x5611c9a94ed0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x77ad6d0b123b7229 - Init START +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO ncclCommInitRank comm 0x55ba4a86d490 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x77ad6d0b123b7229 - Init START +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO ncclCommInitRank comm 0x55a6c5aca810 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x77ad6d0b123b7229 - Init START +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO ncclCommInitRank comm 0x55d5dd1b6c00 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x77ad6d0b123b7229 - Init START +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO ncclCommInitRank comm 0x55f5f4aefea0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x77ad6d0b123b7229 - Init START +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO ncclCommInitRank comm 0x560831b7a540 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x77ad6d0b123b7229 - Init START +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO ncclCommInitRank comm 0x5612336c7110 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x77ad6d0b123b7229 - Init START +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO ncclCommInitRank comm 0x557ce59c2940 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x77ad6d0b123b7229 - Init START +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO comm 0x557ce59c2940 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO comm 0x55ba4a86d490 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO comm 0x55a6c5aca810 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO comm 0x5611c9a94ed0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO comm 0x560831b7a540 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO comm 0x55f5f4aefea0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO comm 0x55d5dd1b6c00 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO comm 0x5612336c7110 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:801881:803507 [6] NCCL INFO ncclCommInitRank comm 0x55f5f4aefea0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x77ad6d0b123b7229 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:801877:803511 [2] NCCL INFO ncclCommInitRank comm 0x55ba4a86d490 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x77ad6d0b123b7229 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:801879:803510 [4] NCCL INFO ncclCommInitRank comm 0x5612336c7110 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x77ad6d0b123b7229 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:801875:803506 [0] NCCL INFO ncclCommInitRank comm 0x560831b7a540 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x77ad6d0b123b7229 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:801880:803508 [5] NCCL INFO ncclCommInitRank comm 0x55d5dd1b6c00 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x77ad6d0b123b7229 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:801878:803509 [3] NCCL INFO ncclCommInitRank comm 0x557ce59c2940 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x77ad6d0b123b7229 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:801882:803513 [7] NCCL INFO ncclCommInitRank comm 0x5611c9a94ed0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x77ad6d0b123b7229 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:801876:803512 [1] NCCL INFO ncclCommInitRank comm 0x55a6c5aca810 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x77ad6d0b123b7229 - Init COMPLETE +[2025-10-13 13:04:09,461] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.laloading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from loading language model from yers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 13:29:48,837] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 13:30:07,196 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 13:30:07,201 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:004->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:801877:809130 [2] NCCL INFO ncclCommInitRank comm 0x7fa8b406af10 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x8b1c2f0a3edd0467 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801881:809131 [6] NCCL INFO ncclCommInitRank comm 0x7fa35806ae20 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x8b1c2f0a3edd0467 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801875:809126 [0] NCCL INFO ncclCommInitRank comm 0x7effd006a890 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x8b1c2f0a3edd0467 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801879:809133 [4] NCCL INFO ncclCommInitRank comm 0x7f325c06b3d0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x8b1c2f0a3edd0467 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801880:809129 [5] NCCL INFO ncclCommInitRank comm 0x7fdb0806b1a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x8b1c2f0a3edd0467 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801876:809132 [1] NCCL INFO ncclCommInitRank comm 0x7fa8e406b120 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x8b1c2f0a3edd0467 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801882:809127 [7] NCCL INFO ncclCommInitRank comm 0x7f243406aef0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x8b1c2f0a3edd0467 - Init COMPLETE +ywang29-vrdb-test1-worker-0:801878:809128 [3] NCCL INFO ncclCommInitRank comm 0x7fb83806b0e0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x8b1c2f0a3edd0467 - Init COMPLETE + 0%| | 1/520 [00:14<2:01:58, 14.10s/it] {'loss': 3.8836, 'grad_norm': 0.258434383136322, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:01:58, 14.10s/it] 0%| | 2/520 [00:17<1:09:20, 8.03s/it] {'loss': 3.5651, 'grad_norm': 0.24008774439600014, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:20, 8.03s/it] 1%| | 3/520 [00:21<52:06, 6.05s/it] {'loss': 2.1356, 'grad_norm': 0.027041935006446898, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<52:06, 6.05s/it] 1%| | 4/520 [00:25<44:15, 5.15s/it] {'loss': 1.9023, 'grad_norm': 0.016309708757145984, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<44:15, 5.15s/it] 1%| | 5/520 [00:29<40:08, 4.68s/it] {'loss': 1.91, 'grad_norm': 0.02119832003703436, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:08, 4.68s/it] 1%| | 6/520 [00:33<37:39, 4.40s/it] {'loss': 1.7347, 'grad_norm': 0.01528372877262425, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:33<37:39, 4.40s/it] 1%|▏ | 7/520 [00:36<35:51, 4.19s/it] {'loss': 1.6003, 'grad_norm': 0.013136665379173953, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<35:51, 4.19s/it] 2%|▏ | 8/520 [00:41<36:26, 4.27s/it] {'loss': 1.6209, 'grad_norm': 0.008355282284621712, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:26, 4.27s/it] 2%|▏ | 9/520 [00:45<36:40, 4.31s/it] {'loss': 1.68, 'grad_norm': 0.007531131296326883, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<36:40, 4.31s/it] 2%|▏ | 10/520 [00:49<35:16, 4.15s/it] {'loss': 1.4872, 'grad_norm': 0.006405800771314058, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<35:16, 4.15s/it] 2%|▏ | 11/520 [00:53<34:40, 4.09s/it] {'loss': 1.5783, 'grad_norm': 0.006524047837644773, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:40, 4.09s/it] 2%|▏ | 12/520 [00:57<33:56, 4.01s/it] {'loss': 1.5386, 'grad_norm': 0.005083796070978917, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<33:56, 4.01s/it][2025-10-13 13:31:13,155] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<35:05, 4.15s/it] {'loss': 1.5146, 'grad_norm': 0.00496439882552926, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<35:05, 4.15s/it] 3%|▎ | 14/520 [01:05<34:08, 4.05s/it] {'loss': 1.5553, 'grad_norm': 0.0047621618754709215, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<34:08, 4.05s/it] 3%|▎ | 15/520 [01:09<33:30, 3.98s/it] {'loss': 1.5813, 'grad_norm': 0.005570221096789186, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:09<33:30, 3.98s/it] 3%|▎ | 16/520 [01:13<33:00, 3.93s/it] {'loss': 1.5192, 'grad_norm': 0.0044319778321639195, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:13<33:00, 3.93s/it] 3%|▎ | 17/520 [01:16<32:35, 3.89s/it] {'loss': 1.5984, 'grad_norm': 0.0043965196781119795, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<32:35, 3.89s/it] 3%|▎ | 18/520 [01:20<32:18, 3.86s/it] {'loss': 1.4499, 'grad_norm': 0.004597390483379298, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:20<32:18, 3.86s/it] 4%|▎ | 19/520 [01:24<32:08, 3.85s/it] {'loss': 1.561, 'grad_norm': 0.004189304402733267, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:24<32:08, 3.85s/it] 4%|▍ | 20/520 [01:28<32:02, 3.84s/it] {'loss': 1.4678, 'grad_norm': 0.004526437056042982, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:28<32:02, 3.84s/it] 4%|▍ | 21/520 [01:32<32:08, 3.86s/it] {'loss': 1.5852, 'grad_norm': 0.005129954433742349, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:32<32:08, 3.86s/it] 4%|▍ | 22/520 [01:36<31:55, 3.85s/it] {'loss': 1.5929, 'grad_norm': 0.003609931788913357, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:36<31:55, 3.85s/it] 4%|▍ | 23/520 [01:39<31:44, 3.83s/it] {'loss': 1.5216, 'grad_norm': 0.004037789140010819, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:39<31:44, 3.83s/it] 5%|▍ | 24/520 [01:43<31:47, 3.84s/it] {'loss': 1.5054, 'grad_norm': 0.003532849230753453, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:43<31:47, 3.84s/it] 5%|▍ | 25/520 [01:47<31:32, 3.82s/it] {'loss': 1.5459, 'grad_norm': 0.00404706576549276, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:47<31:32, 3.82s/it] 5%|▌ | 26/520 [01:51<31:30, 3.83s/it] {'loss': 1.5284, 'grad_norm': 0.0030912427865347177, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:51<31:30, 3.83s/it] 5%|▌ | 27/520 [01:55<31:27, 3.83s/it] {'loss': 1.4387, 'grad_norm': 0.003827924264427086, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:55<31:27, 3.83s/it] 5%|▌ | 28/520 [01:59<31:19, 3.82s/it] {'loss': 1.4228, 'grad_norm': 0.003220652477995093, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:59<31:19, 3.82s/it] 6%|▌ | 29/520 [02:02<31:15, 3.82s/it] {'loss': 1.4438, 'grad_norm': 0.0033361416321940423, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:02<31:15, 3.82s/it] 6%|▌ | 30/520 [02:06<31:08, 3.81s/it] {'loss': 1.6057, 'grad_norm': 0.0037933800584962585, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:06<31:08, 3.81s/it] 6%|▌ | 31/520 [02:10<31:06, 3.82s/it] {'loss': 1.4327, 'grad_norm': 0.00282897982632123, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:10<31:06, 3.82s/it] 6%|▌ | 32/520 [02:14<30:54, 3.80s/it] {'loss': 1.5786, 'grad_norm': 0.006665812645321092, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:14<30:54, 3.80s/it] 6%|▋ | 33/520 [02:17<30:25, 3.75s/it] {'loss': 1.446, 'grad_norm': 0.0038132123182578934, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:17<30:25, 3.75s/it] 7%|▋ | 34/520 [02:21<30:08, 3.72s/it] {'loss': 1.4231, 'grad_norm': 0.004393128163058325, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:21<30:08, 3.72s/it] 7%|▋ | 35/520 [02:25<29:52, 3.70s/it] {'loss': 1.4535, 'grad_norm': 0.004685473438839922, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:25<29:52, 3.70s/it] 7%|▋ | 36/520 [02:28<29:37, 3.67s/it] {'loss': 1.5591, 'grad_norm': 0.0030995446412705754, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:28<29:37, 3.67s/it] 7%|▋ | 37/520 [02:32<29:26, 3.66s/it] {'loss': 1.6298, 'grad_norm': 0.005520497176128504, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:32<29:26, 3.66s/it] 7%|▋ | 38/520 [02:35<29:13, 3.64s/it] {'loss': 1.6317, 'grad_norm': 0.004204084835917422, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:35<29:13, 3.64s/it] 8%|▊ | 39/520 [02:39<29:03, 3.63s/it] {'loss': 1.4552, 'grad_norm': 0.0035362098937504263, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:39<29:03, 3.63s/it] 8%|▊ | 40/520 [02:43<28:59, 3.62s/it] {'loss': 1.4962, 'grad_norm': 0.003724684545234214, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:43<28:59, 3.62s/it] 8%|▊ | 41/520 [02:46<28:53, 3.62s/it] {'loss': 1.4608, 'grad_norm': 0.003123082549216068, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:46<28:53, 3.62s/it] 8%|▊ | 42/520 [02:50<28:49, 3.62s/it] {'loss': 1.5029, 'grad_norm': 0.004139580799093141, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:50<28:49, 3.62s/it] 8%|▊ | 43/520 [02:54<28:55, 3.64s/it] {'loss': 1.4806, 'grad_norm': 0.004667047023354636, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:54<28:55, 3.64s/it] 8%|▊ | 44/520 [02:57<28:57, 3.65s/it] {'loss': 1.6439, 'grad_norm': 0.004489153328359815, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:57<28:57, 3.65s/it] 9%|▊ | 45/520 [03:01<28:59, 3.66s/it] {'loss': 1.5036, 'grad_norm': 0.004775007945517418, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:01<28:59, 3.66s/it] 9%|▉ | 46/520 [03:05<28:58, 3.67s/it] {'loss': 1.6574, 'grad_norm': 0.003540771889459277, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:05<28:58, 3.67s/it] 9%|▉ | 47/520 [03:08<28:49, 3.66s/it] {'loss': 1.4921, 'grad_norm': 0.0047974948430305015, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:08<28:49, 3.66s/it] 9%|▉ | 48/520 [03:12<28:46, 3.66s/it] {'loss': 1.4622, 'grad_norm': 0.0034797145797113927, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:12<28:46, 3.66s/it] 9%|▉ | 49/520 [03:16<28:41, 3.65s/it] {'loss': 1.4935, 'grad_norm': 0.0032071023879516073, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:16<28:41, 3.65s/it] 10%|▉ | 50/520 [03:19<28:35, 3.65s/it] {'loss': 1.4891, 'grad_norm': 0.003023526558002696, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:19<28:35, 3.65s/it] 10%|▉ | 51/520 [03:23<28:32, 3.65s/it] {'loss': 1.4123, 'grad_norm': 0.0036559077930115426, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:23<28:32, 3.65s/it] 10%|█ | 52/520 [03:26<28:24, 3.64s/it] {'loss': 1.5487, 'grad_norm': 0.004460464029258322, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:26<28:24, 3.64s/it] 10%|█ | 53/520 [03:30<28:23, 3.65s/it] {'loss': 1.5504, 'grad_norm': 0.0033788667467282126, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:30<28:23, 3.65s/it] 10%|█ | 54/520 [03:34<28:17, 3.64s/it] {'loss': 1.4357, 'grad_norm': 0.003908037950889589, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:34<28:17, 3.64s/it] 11%|█ | 55/520 [03:37<28:13, 3.64s/it] {'loss': 1.4166, 'grad_norm': 0.0035167889048284915, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<28:13, 3.64s/it] 11%|█ | 56/520 [03:41<28:09, 3.64s/it] {'loss': 1.5528, 'grad_norm': 0.0033646616650850297, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:41<28:09, 3.64s/it] 11%|█ | 57/520 [03:45<28:02, 3.63s/it] {'loss': 1.4189, 'grad_norm': 0.005295634602629748, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:45<28:02, 3.63s/it] 11%|█ | 58/520 [03:48<28:02, 3.64s/it] {'loss': 1.5649, 'grad_norm': 0.002647587038490976, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<28:02, 3.64s/it] 11%|█▏ | 59/520 [03:52<28:03, 3.65s/it] {'loss': 1.4634, 'grad_norm': 0.015001211315653577, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<28:03, 3.65s/it] 12%|█▏ | 60/520 [03:56<27:58, 3.65s/it] {'loss': 1.4973, 'grad_norm': 0.0054529756405929065, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<27:58, 3.65s/it] 12%|█▏ | 61/520 [03:59<27:53, 3.64s/it] {'loss': 1.6076, 'grad_norm': 0.003474352671555696, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<27:53, 3.64s/it] 12%|█▏ | 62/520 [04:03<28:07, 3.68s/it] {'loss': 1.4727, 'grad_norm': 0.003952788828086268, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:07, 3.68s/it] 12%|█▏ | 63/520 [04:07<28:04, 3.69s/it] {'loss': 1.4522, 'grad_norm': 0.004298598355193721, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<28:04, 3.69s/it] 12%|█▏ | 64/520 [04:10<27:57, 3.68s/it] {'loss': 1.4952, 'grad_norm': 0.002974610853192383, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:10<27:57, 3.68s/it] 12%|█▎ | 65/520 [04:14<27:46, 3.66s/it] {'loss': 1.5066, 'grad_norm': 0.003705934587050329, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<27:46, 3.66s/it] 13%|█▎ | 66/520 [04:18<27:52, 3.68s/it] {'loss': 1.4541, 'grad_norm': 0.004506975501365388, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:18<27:52, 3.68s/it] 13%|█▎ | 67/520 [04:21<27:37, 3.66s/it] {'loss': 1.3378, 'grad_norm': 0.0025710606594294663, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:21<27:37, 3.66s/it] 13%|█▎ | 68/520 [04:25<27:28, 3.65s/it] {'loss': 1.3907, 'grad_norm': 0.002972164857669033, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:25<27:28, 3.65s/it] 13%|█▎ | 69/520 [04:29<27:18, 3.63s/it] {'loss': 1.3724, 'grad_norm': 0.004468487577768677, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:29<27:18, 3.63s/it] 13%|█▎ | 70/520 [04:32<27:13, 3.63s/it] {'loss': 1.4245, 'grad_norm': 0.003118301173506068, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:32<27:13, 3.63s/it] 14%|█▎ | 71/520 [04:36<27:12, 3.64s/it] {'loss': 1.3503, 'grad_norm': 0.0031822342892683683, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:36<27:12, 3.64s/it] 14%|█▍ | 72/520 [04:39<27:04, 3.63s/it] {'loss': 1.4948, 'grad_norm': 0.0034951125979156587, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:39<27:04, 3.63s/it] 14%|█▍ | 73/520 [04:43<27:03, 3.63s/it] {'loss': 1.3259, 'grad_norm': 0.002652514122096093, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:43<27:03, 3.63s/it] 14%|█▍ | 74/520 [04:47<26:57, 3.63s/it] {'loss': 1.4447, 'grad_norm': 0.0031136810382806682, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:47<26:57, 3.63s/it] 14%|█▍ | 75/520 [04:50<26:56, 3.63s/it] {'loss': 1.3415, 'grad_norm': 0.003027474255918949, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:50<26:56, 3.63s/it] 15%|█▍ | 76/520 [04:54<26:57, 3.64s/it] {'loss': 1.6546, 'grad_norm': 0.004127757119205055, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:54<26:57, 3.64s/it] 15%|█▍ | 77/520 [04:58<26:49, 3.63s/it] {'loss': 1.275, 'grad_norm': 0.0033746922016934535, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:58<26:49, 3.63s/it] 15%|█▌ | 78/520 [05:01<26:42, 3.63s/it] {'loss': 1.3996, 'grad_norm': 0.0030783390201393123, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:01<26:42, 3.63s/it] 15%|█▌ | 79/520 [05:05<26:37, 3.62s/it] {'loss': 1.3842, 'grad_norm': 0.0024579393868312855, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:05<26:37, 3.62s/it] 15%|█▌ | 80/520 [05:08<26:32, 3.62s/it] {'loss': 1.6576, 'grad_norm': 0.00407303785181872, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:08<26:32, 3.62s/it] 16%|█▌ | 81/520 [05:12<26:25, 3.61s/it] {'loss': 1.5406, 'grad_norm': 0.004303884008011059, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:12<26:25, 3.61s/it] 16%|█▌ | 82/520 [05:16<26:23, 3.61s/it] {'loss': 1.4548, 'grad_norm': 0.0026609721107463203, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:16<26:23, 3.61s/it] 16%|█▌ | 83/520 [05:19<26:17, 3.61s/it] {'loss': 1.474, 'grad_norm': 0.002821694093832227, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:19<26:17, 3.61s/it] 16%|█▌ | 84/520 [05:23<26:21, 3.63s/it] {'loss': 1.4876, 'grad_norm': 0.0035988315494364788, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:23<26:21, 3.63s/it] 16%|█▋ | 85/520 [05:27<26:14, 3.62s/it] {'loss': 1.4995, 'grad_norm': 0.0028411149761683606, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:27<26:14, 3.62s/it] 17%|█▋ | 86/520 [05:30<26:22, 3.65s/it] {'loss': 1.5207, 'grad_norm': 0.0034419126868946966, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:30<26:22, 3.65s/it] 17%|█▋ | 87/520 [05:34<26:16, 3.64s/it] {'loss': 1.5721, 'grad_norm': 0.0030911450177654016, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:34<26:16, 3.64s/it] 17%|█▋ | 88/520 [05:38<26:26, 3.67s/it] {'loss': 1.5904, 'grad_norm': 0.0040339510515523605, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:38<26:26, 3.67s/it] 17%|█▋ | 89/520 [05:41<26:14, 3.65s/it] {'loss': 1.462, 'grad_norm': 0.0029683594105404942, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:41<26:14, 3.65s/it] 17%|█▋ | 90/520 [05:45<26:12, 3.66s/it] {'loss': 1.3965, 'grad_norm': 0.0027869268375177135, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:45<26:12, 3.66s/it] 18%|█▊ | 91/520 [05:49<26:05, 3.65s/it] {'loss': 1.4593, 'grad_norm': 0.0023773436630587516, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:49<26:05, 3.65s/it] 18%|█▊ | 92/520 [05:52<25:58, 3.64s/it] {'loss': 1.4076, 'grad_norm': 0.0027283319519379075, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:52<25:58, 3.64s/it] 18%|█▊ | 93/520 [05:56<25:54, 3.64s/it] {'loss': 1.4102, 'grad_norm': 0.0025699068631392847, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:56<25:54, 3.64s/it] 18%|█▊ | 94/520 [05:59<25:46, 3.63s/it] {'loss': 1.5134, 'grad_norm': 0.0029461464907635515, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [05:59<25:46, 3.63s/it] 18%|█▊ | 95/520 [06:03<25:48, 3.64s/it] {'loss': 1.3941, 'grad_norm': 0.0034525068174505145, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:03<25:48, 3.64s/it] 18%|█▊ | 96/520 [06:07<25:58, 3.68s/it] {'loss': 1.4081, 'grad_norm': 0.002356039561205529, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:07<25:58, 3.68s/it] 19%|█▊ | 97/520 [06:11<25:55, 3.68s/it] {'loss': 1.3732, 'grad_norm': 0.0027612712016020203, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:11<25:55, 3.68s/it] 19%|█▉ | 98/520 [06:14<25:45, 3.66s/it] {'loss': 1.3731, 'grad_norm': 0.0023388246007213665, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:14<25:45, 3.66s/it] 19%|█▉ | 99/520 [06:18<25:34, 3.64s/it] {'loss': 1.3961, 'grad_norm': 0.0028911378261669263, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:18<25:34, 3.64s/it] 19%|█▉ | 100/520 [06:21<25:25, 3.63s/it] {'loss': 1.4566, 'grad_norm': 0.002694580364748155, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:21<25:25, 3.63s/it] 19%|█▉ | 101/520 [06:25<25:17, 3.62s/it] {'loss': 1.3887, 'grad_norm': 0.0027882899027300797, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:25<25:17, 3.62s/it] 20%|█▉ | 102/520 [06:29<25:10, 3.61s/it] {'loss': 1.3925, 'grad_norm': 0.002714543341408547, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:29<25:10, 3.61s/it] 20%|█▉ | 103/520 [06:32<25:07, 3.61s/it] {'loss': 1.3274, 'grad_norm': 0.002441632492441796, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:32<25:07, 3.61s/it] 20%|██ | 104/520 [06:36<25:07, 3.62s/it] {'loss': 1.4001, 'grad_norm': 0.002798894223410839, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:36<25:07, 3.62s/it] 20%|██ | 105/520 [06:39<24:57, 3.61s/it] {'loss': 1.3862, 'grad_norm': 0.0022336435162077233, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:39<24:57, 3.61s/it] 20%|██ | 106/520 [06:43<24:54, 3.61s/it] {'loss': 1.4631, 'grad_norm': 0.0028282403980374876, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:43<24:54, 3.61s/it] 21%|██ | 107/520 [06:47<24:57, 3.63s/it] {'loss': 1.4355, 'grad_norm': 0.003016879493813183, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:47<24:57, 3.63s/it] 21%|██ | 108/520 [06:50<24:51, 3.62s/it] {'loss': 1.3434, 'grad_norm': 0.0025863356674889156, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:50<24:51, 3.62s/it] 21%|██ | 109/520 [06:54<24:50, 3.63s/it] {'loss': 1.4392, 'grad_norm': 0.0025696772739001, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:54<24:50, 3.63s/it] 21%|██ | 110/520 [06:58<24:46, 3.63s/it] {'loss': 1.5469, 'grad_norm': 0.0025755721428864205, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [06:58<24:46, 3.63s/it] 21%|██▏ | 111/520 [07:01<24:39, 3.62s/it] {'loss': 1.5565, 'grad_norm': 0.0026842675306580233, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:01<24:39, 3.62s/it] 22%|██▏ | 112/520 [07:05<24:35, 3.62s/it] {'loss': 1.4294, 'grad_norm': 0.0025193758326733716, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:05<24:35, 3.62s/it] 22%|██▏ | 113/520 [07:08<24:32, 3.62s/it] {'loss': 1.2905, 'grad_norm': 0.0021420252358545972, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:08<24:32, 3.62s/it] 22%|██▏ | 114/520 [07:12<24:28, 3.62s/it] {'loss': 1.3947, 'grad_norm': 0.0023296494398799272, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:12<24:28, 3.62s/it] 22%|██▏ | 115/520 [07:16<24:23, 3.61s/it] {'loss': 1.5206, 'grad_norm': 0.0022745665707617946, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:16<24:23, 3.61s/it] 22%|██▏ | 116/520 [07:19<24:20, 3.62s/it] {'loss': 1.5104, 'grad_norm': 0.0021013417940849793, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:19<24:20, 3.62s/it] 22%|██▎ | 117/520 [07:23<24:19, 3.62s/it] {'loss': 1.4941, 'grad_norm': 0.002507158571415479, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:23<24:19, 3.62s/it] 23%|██▎ | 118/520 [07:27<24:32, 3.66s/it] {'loss': 1.3711, 'grad_norm': 0.002225262657762834, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:27<24:32, 3.66s/it] 23%|██▎ | 119/520 [07:30<24:37, 3.68s/it] {'loss': 1.3276, 'grad_norm': 0.002247313055332239, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:30<24:37, 3.68s/it] 23%|██▎ | 120/520 [07:34<24:49, 3.72s/it] {'loss': 1.3493, 'grad_norm': 0.002816921779783115, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:34<24:49, 3.72s/it] 23%|██▎ | 121/520 [07:38<24:44, 3.72s/it] {'loss': 1.4133, 'grad_norm': 0.0025908985424928943, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:38<24:44, 3.72s/it] 23%|██▎ | 122/520 [07:42<25:00, 3.77s/it] {'loss': 1.2985, 'grad_norm': 0.002319332398986015, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:42<25:00, 3.77s/it] 24%|██▎ | 123/520 [07:46<25:04, 3.79s/it] {'loss': 1.5006, 'grad_norm': 0.0025147962097512907, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:46<25:04, 3.79s/it] 24%|██▍ | 124/520 [07:49<25:12, 3.82s/it] {'loss': 1.3937, 'grad_norm': 0.0026616923824739343, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:49<25:12, 3.82s/it] 24%|██▍ | 125/520 [07:53<25:12, 3.83s/it] {'loss': 1.3742, 'grad_norm': 0.0023704858740013876, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:53<25:12, 3.83s/it] 24%|██▍ | 126/520 [07:58<26:29, 4.03s/it] {'loss': 1.4107, 'grad_norm': 0.002093848295570042, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:58<26:29, 4.03s/it] 24%|██▍ | 127/520 [08:01<25:34, 3.91s/it] {'loss': 1.352, 'grad_norm': 0.0029697584849144422, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:01<25:34, 3.91s/it] 25%|██▍ | 128/520 [08:05<24:59, 3.83s/it] {'loss': 1.408, 'grad_norm': 0.0024606577914091598, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:05<24:59, 3.83s/it] 25%|██▍ | 129/520 [08:09<24:30, 3.76s/it] {'loss': 1.328, 'grad_norm': 0.0020855231544734554, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:09<24:30, 3.76s/it] 25%|██▌ | 130/520 [08:12<24:07, 3.71s/it] {'loss': 1.3886, 'grad_norm': 0.0021602612443994656, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:12<24:07, 3.71s/it] 25%|██▌ | 131/520 [08:16<23:53, 3.68s/it] {'loss': 1.3841, 'grad_norm': 0.0026807695686793076, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:16<23:53, 3.68s/it] 25%|██▌ | 132/520 [08:20<23:37, 3.65s/it] {'loss': 1.4354, 'grad_norm': 0.0023946089256810547, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:20<23:37, 3.65s/it] 26%|██▌ | 133/520 [08:23<23:30, 3.64s/it] {'loss': 1.3398, 'grad_norm': 0.0023424960658750232, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:23<23:30, 3.64s/it] 26%|██▌ | 134/520 [08:27<23:21, 3.63s/it] {'loss': 1.4263, 'grad_norm': 0.002798083499730072, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:27<23:21, 3.63s/it] 26%|██▌ | 135/520 [08:30<23:18, 3.63s/it] {'loss': 1.4999, 'grad_norm': 0.0023691826873602872, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:30<23:18, 3.63s/it] 26%|██▌ | 136/520 [08:34<23:17, 3.64s/it] {'loss': 1.4181, 'grad_norm': 0.002457996253380213, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:34<23:17, 3.64s/it] 26%|██▋ | 137/520 [08:38<23:11, 3.63s/it] {'loss': 1.3425, 'grad_norm': 0.0026104356774988694, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:38<23:11, 3.63s/it] 27%|██▋ | 138/520 [08:41<23:29, 3.69s/it] {'loss': 1.3435, 'grad_norm': 0.0021417713368518786, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:41<23:29, 3.69s/it] 27%|██▋ | 139/520 [08:45<23:42, 3.73s/it] {'loss': 1.287, 'grad_norm': 0.002628014272666977, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:45<23:42, 3.73s/it] 27%|██▋ | 140/520 [08:49<23:53, 3.77s/it] {'loss': 1.4282, 'grad_norm': 0.0022546921695067584, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:49<23:53, 3.77s/it] 27%|██▋ | 141/520 [08:53<23:59, 3.80s/it] {'loss': 1.4621, 'grad_norm': 0.0022876551017592794, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:53<23:59, 3.80s/it] 27%|██▋ | 142/520 [08:57<24:00, 3.81s/it] {'loss': 1.4726, 'grad_norm': 0.002201565737677759, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:57<24:00, 3.81s/it] 28%|██▊ | 143/520 [09:01<24:04, 3.83s/it] {'loss': 1.3818, 'grad_norm': 0.0024292569918090714, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:01<24:04, 3.83s/it] 28%|██▊ | 144/520 [09:05<24:03, 3.84s/it] {'loss': 1.3323, 'grad_norm': 0.0022822648866674235, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:05<24:03, 3.84s/it] 28%|██▊ | 145/520 [09:08<23:59, 3.84s/it] {'loss': 1.2669, 'grad_norm': 0.002030742095923602, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:08<23:59, 3.84s/it] 28%|██▊ | 146/520 [09:12<23:57, 3.84s/it] {'loss': 1.5103, 'grad_norm': 0.0022672671965902947, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:12<23:57, 3.84s/it] 28%|██▊ | 147/520 [09:16<23:59, 3.86s/it] {'loss': 1.3079, 'grad_norm': 0.0021606234109137214, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:16<23:59, 3.86s/it] 28%|██▊ | 148/520 [09:20<23:56, 3.86s/it] {'loss': 1.3491, 'grad_norm': 0.0022409616913365853, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:20<23:56, 3.86s/it] 29%|██▊ | 149/520 [09:24<23:57, 3.87s/it] {'loss': 1.297, 'grad_norm': 0.00235207865709141, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:24<23:57, 3.87s/it] 29%|██▉ | 150/520 [09:28<23:50, 3.87s/it] {'loss': 1.5263, 'grad_norm': 0.0024158583861932244, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:28<23:50, 3.87s/it] 29%|██▉ | 151/520 [09:32<23:46, 3.87s/it] {'loss': 1.335, 'grad_norm': 0.0022119415583964665, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:32<23:46, 3.87s/it] 29%|██▉ | 152/520 [09:35<23:20, 3.81s/it] {'loss': 1.3039, 'grad_norm': 0.002293031180914037, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:35<23:20, 3.81s/it] 29%|██▉ | 153/520 [09:39<22:57, 3.75s/it] {'loss': 1.3342, 'grad_norm': 0.002292608652273999, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:39<22:57, 3.75s/it] 30%|██▉ | 154/520 [09:43<22:41, 3.72s/it] {'loss': 1.4272, 'grad_norm': 0.0021552014466986414, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:43<22:41, 3.72s/it] 30%|██▉ | 155/520 [09:46<22:35, 3.71s/it] {'loss': 1.3378, 'grad_norm': 0.002298236730560488, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:46<22:35, 3.71s/it] 30%|███ | 156/520 [09:50<22:22, 3.69s/it] {'loss': 1.3619, 'grad_norm': 0.002476301893341126, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:50<22:22, 3.69s/it] 30%|███ | 157/520 [09:54<22:19, 3.69s/it] {'loss': 1.4978, 'grad_norm': 0.0024320497579264503, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:54<22:19, 3.69s/it] 30%|███ | 158/520 [09:58<22:38, 3.75s/it] {'loss': 1.3401, 'grad_norm': 0.00261017631677333, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:58<22:38, 3.75s/it] 31%|███ | 159/520 [10:02<23:13, 3.86s/it] {'loss': 1.3716, 'grad_norm': 0.002146273737136317, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:02<23:13, 3.86s/it] 31%|███ | 160/520 [10:06<23:41, 3.95s/it] {'loss': 1.3928, 'grad_norm': 0.0022915442956827612, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:06<23:41, 3.95s/it] 31%|███ | 161/520 [10:10<23:28, 3.92s/it] {'loss': 1.3772, 'grad_norm': 0.0023608635858948803, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:10<23:28, 3.92s/it] 31%|███ | 162/520 [10:14<23:16, 3.90s/it] {'loss': 1.4359, 'grad_norm': 0.00253320503606558, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:14<23:16, 3.90s/it] 31%|███▏ | 163/520 [10:17<22:51, 3.84s/it] {'loss': 1.2571, 'grad_norm': 0.0030118156168122447, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:17<22:51, 3.84s/it] 32%|███▏ | 164/520 [10:21<22:28, 3.79s/it] {'loss': 1.2241, 'grad_norm': 0.0022227334902755985, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:21<22:28, 3.79s/it] 32%|███▏ | 165/520 [10:25<22:08, 3.74s/it] {'loss': 1.3663, 'grad_norm': 0.0020791781004029014, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:25<22:08, 3.74s/it] 32%|███▏ | 166/520 [10:28<21:52, 3.71s/it] {'loss': 1.3622, 'grad_norm': 0.0024765952093872617, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:28<21:52, 3.71s/it] 32%|███▏ | 167/520 [10:32<21:43, 3.69s/it] {'loss': 1.3503, 'grad_norm': 0.002583792531360595, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:32<21:43, 3.69s/it] 32%|███▏ | 168/520 [10:35<21:33, 3.67s/it] {'loss': 1.2748, 'grad_norm': 0.002156699105800558, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:35<21:33, 3.67s/it] 32%|███▎ | 169/520 [10:39<21:26, 3.67s/it] {'loss': 1.3684, 'grad_norm': 0.002117246626630299, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:39<21:26, 3.67s/it] 33%|███▎ | 170/520 [10:43<21:20, 3.66s/it] {'loss': 1.369, 'grad_norm': 0.002594805417450962, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:43<21:20, 3.66s/it] 33%|███▎ | 171/520 [10:46<21:12, 3.65s/it] {'loss': 1.296, 'grad_norm': 0.0024674190990379885, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:46<21:12, 3.65s/it] 33%|███▎ | 172/520 [10:50<21:05, 3.64s/it] {'loss': 1.3593, 'grad_norm': 0.002122107908637039, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:50<21:05, 3.64s/it] 33%|███▎ | 173/520 [10:54<20:59, 3.63s/it] {'loss': 1.2989, 'grad_norm': 0.002162012046133847, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:54<20:59, 3.63s/it] 33%|███▎ | 174/520 [10:57<21:03, 3.65s/it] {'loss': 1.3747, 'grad_norm': 0.00252446657701512, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:57<21:03, 3.65s/it] 34%|███▎ | 175/520 [11:01<20:59, 3.65s/it] {'loss': 1.2651, 'grad_norm': 0.0021146909949554785, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:01<20:59, 3.65s/it] 34%|███▍ | 176/520 [11:05<21:00, 3.66s/it] {'loss': 1.4445, 'grad_norm': 0.0022016805957087056, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:05<21:00, 3.66s/it] 34%|███▍ | 177/520 [11:08<20:53, 3.65s/it] {'loss': 1.3159, 'grad_norm': 0.0025140541923947927, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:08<20:53, 3.65s/it] 34%|███▍ | 178/520 [11:12<20:48, 3.65s/it] {'loss': 1.3422, 'grad_norm': 0.002394276214286673, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:12<20:48, 3.65s/it] 34%|███▍ | 179/520 [11:16<20:41, 3.64s/it] {'loss': 1.425, 'grad_norm': 0.002051032145589871, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:16<20:41, 3.64s/it] 35%|███▍ | 180/520 [11:19<20:35, 3.63s/it] {'loss': 1.3362, 'grad_norm': 0.0023335846362225556, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:19<20:35, 3.63s/it] 35%|███▍ | 181/520 [11:23<20:33, 3.64s/it] {'loss': 1.3108, 'grad_norm': 0.0020877470228260996, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:23<20:33, 3.64s/it] 35%|███▌ | 182/520 [11:26<20:27, 3.63s/it] {'loss': 1.3273, 'grad_norm': 0.002248628500388908, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:26<20:27, 3.63s/it] 35%|███▌ | 183/520 [11:30<20:25, 3.64s/it] {'loss': 1.3612, 'grad_norm': 0.0023104546250821544, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:30<20:25, 3.64s/it] 35%|███▌ | 184/520 [11:34<20:22, 3.64s/it] {'loss': 1.2592, 'grad_norm': 0.0022024818160366368, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:34<20:22, 3.64s/it] 36%|███▌ | 185/520 [11:37<20:26, 3.66s/it] {'loss': 1.447, 'grad_norm': 0.002236481340398751, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:37<20:26, 3.66s/it] 36%|███▌ | 186/520 [11:41<20:33, 3.69s/it] {'loss': 1.2936, 'grad_norm': 0.0021359965860200253, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:41<20:33, 3.69s/it] 36%|███▌ | 187/520 [11:45<20:39, 3.72s/it] {'loss': 1.3027, 'grad_norm': 0.0025858201053133587, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:45<20:39, 3.72s/it] 36%|███▌ | 188/520 [11:49<20:25, 3.69s/it] {'loss': 1.3817, 'grad_norm': 0.002405570848650889, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:49<20:25, 3.69s/it] 36%|███▋ | 189/520 [11:52<20:26, 3.71s/it] {'loss': 1.3909, 'grad_norm': 0.0019977648657594107, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:52<20:26, 3.71s/it] 37%|███▋ | 190/520 [11:56<20:42, 3.76s/it] {'loss': 1.3044, 'grad_norm': 0.002362337513591826, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:56<20:42, 3.76s/it] 37%|███▋ | 191/520 [12:00<20:44, 3.78s/it] {'loss': 1.2578, 'grad_norm': 0.0020544335874398824, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:00<20:44, 3.78s/it] 37%|███▋ | 192/520 [12:04<20:30, 3.75s/it] {'loss': 1.3511, 'grad_norm': 0.0022317512338012394, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:04<20:30, 3.75s/it] 37%|███▋ | 193/520 [12:07<20:16, 3.72s/it] {'loss': 1.3778, 'grad_norm': 0.0024492804246703075, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:07<20:16, 3.72s/it] 37%|███▋ | 194/520 [12:11<20:07, 3.70s/it] {'loss': 1.2519, 'grad_norm': 0.002304140122792827, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:11<20:07, 3.70s/it] 38%|███▊ | 195/520 [12:15<19:59, 3.69s/it] {'loss': 1.3659, 'grad_norm': 0.002231340830062403, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:15<19:59, 3.69s/it] 38%|███▊ | 196/520 [12:18<19:49, 3.67s/it] {'loss': 1.3328, 'grad_norm': 0.0023493442845201333, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:18<19:49, 3.67s/it] 38%|███▊ | 197/520 [12:22<19:52, 3.69s/it] {'loss': 1.2953, 'grad_norm': 0.0020342879092579906, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:22<19:52, 3.69s/it] 38%|███▊ | 198/520 [12:26<20:01, 3.73s/it] {'loss': 1.3669, 'grad_norm': 0.002221760686302141, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:26<20:01, 3.73s/it] 38%|███▊ | 199/520 [12:30<20:09, 3.77s/it] {'loss': 1.2798, 'grad_norm': 0.002256481517820118, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:30<20:09, 3.77s/it] 38%|███▊ | 200/520 [12:34<20:11, 3.79s/it] {'loss': 1.3021, 'grad_norm': 0.002272803501973503, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:34<20:11, 3.79s/it] 39%|███▊ | 201/520 [12:37<20:19, 3.82s/it] {'loss': 1.3202, 'grad_norm': 0.0019226554105209231, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:37<20:19, 3.82s/it] 39%|███▉ | 202/520 [12:41<20:18, 3.83s/it] {'loss': 1.2754, 'grad_norm': 0.002156168013337285, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:41<20:18, 3.83s/it] 39%|███▉ | 203/520 [12:45<20:17, 3.84s/it] {'loss': 1.3358, 'grad_norm': 0.0022688250005414505, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:45<20:17, 3.84s/it] 39%|███▉ | 204/520 [12:49<20:11, 3.83s/it] {'loss': 1.3598, 'grad_norm': 0.0023879161472683597, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:49<20:11, 3.83s/it] 39%|███▉ | 205/520 [12:53<20:12, 3.85s/it] {'loss': 1.3338, 'grad_norm': 0.002168881878085592, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:53<20:12, 3.85s/it] 40%|███▉ | 206/520 [12:57<20:07, 3.85s/it] {'loss': 1.3955, 'grad_norm': 0.0021232706001640808, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:57<20:07, 3.85s/it] 40%|███▉ | 207/520 [13:01<20:06, 3.85s/it] {'loss': 1.316, 'grad_norm': 0.001983480668940166, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:01<20:06, 3.85s/it] 40%|████ | 208/520 [13:04<20:02, 3.85s/it] {'loss': 1.3696, 'grad_norm': 0.0025151542408205934, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:04<20:02, 3.85s/it] 40%|████ | 209/520 [13:08<19:59, 3.86s/it] {'loss': 1.2802, 'grad_norm': 0.001985055935503461, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:08<19:59, 3.86s/it] 40%|████ | 210/520 [13:12<19:58, 3.87s/it] {'loss': 1.3658, 'grad_norm': 0.0022241972231247633, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:12<19:58, 3.87s/it] 41%|████ | 211/520 [13:16<19:53, 3.86s/it] {'loss': 1.3746, 'grad_norm': 0.002003700264550589, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:16<19:53, 3.86s/it] 41%|████ | 212/520 [13:20<19:48, 3.86s/it] {'loss': 1.3383, 'grad_norm': 0.002004953717515684, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:20<19:48, 3.86s/it] 41%|████ | 213/520 [13:24<19:43, 3.86s/it] {'loss': 1.3059, 'grad_norm': 0.002498002974658384, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:24<19:43, 3.86s/it] 41%|████ | 214/520 [13:28<19:38, 3.85s/it] {'loss': 1.2981, 'grad_norm': 0.0022724516377435108, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:28<19:38, 3.85s/it] 41%|████▏ | 215/520 [13:31<19:34, 3.85s/it] {'loss': 1.2549, 'grad_norm': 0.002132878928407487, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:31<19:34, 3.85s/it] 42%|████▏ | 216/520 [13:35<19:28, 3.85s/it] {'loss': 1.2082, 'grad_norm': 0.0020708808167557, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:35<19:28, 3.85s/it] 42%|████▏ | 217/520 [13:39<19:25, 3.85s/it] {'loss': 1.3334, 'grad_norm': 0.0021688713445126404, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:39<19:25, 3.85s/it] 42%|████▏ | 218/520 [13:43<19:23, 3.85s/it] {'loss': 1.3335, 'grad_norm': 0.002296429429528604, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:43<19:23, 3.85s/it] 42%|████▏ | 219/520 [13:47<19:30, 3.89s/it] {'loss': 1.3093, 'grad_norm': 0.002025740261865774, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:47<19:30, 3.89s/it] 42%|████▏ | 220/520 [13:51<19:44, 3.95s/it] {'loss': 1.3016, 'grad_norm': 0.0021849073926824172, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:51<19:44, 3.95s/it] 42%|████▎ | 221/520 [13:55<19:54, 4.00s/it] {'loss': 1.3357, 'grad_norm': 0.002114431344086034, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:55<19:54, 4.00s/it] 43%|████▎ | 222/520 [13:59<19:58, 4.02s/it] {'loss': 1.2446, 'grad_norm': 0.00210512145840308, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:59<19:58, 4.02s/it] 43%|████▎ | 223/520 [14:03<20:00, 4.04s/it] {'loss': 1.24, 'grad_norm': 0.001924147870581999, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:03<20:00, 4.04s/it] 43%|████▎ | 224/520 [14:07<19:57, 4.05s/it] {'loss': 1.4373, 'grad_norm': 0.0030117633307785436, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:07<19:57, 4.05s/it] 43%|████▎ | 225/520 [14:11<19:58, 4.06s/it] {'loss': 1.2612, 'grad_norm': 0.002162135009346823, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:11<19:58, 4.06s/it] 43%|████▎ | 226/520 [14:16<19:53, 4.06s/it] {'loss': 1.362, 'grad_norm': 0.0020008753763182904, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:16<19:53, 4.06s/it] 44%|████▎ | 227/520 [14:19<19:24, 3.98s/it] {'loss': 1.3495, 'grad_norm': 0.001956366517067766, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:19<19:24, 3.98s/it] 44%|████▍ | 228/520 [14:23<19:05, 3.92s/it] {'loss': 1.4313, 'grad_norm': 0.0022574740064234336, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:23<19:05, 3.92s/it] 44%|████▍ | 229/520 [14:27<18:51, 3.89s/it] {'loss': 1.3175, 'grad_norm': 0.0018538415468436758, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:27<18:51, 3.89s/it] 44%|████▍ | 230/520 [14:31<18:40, 3.86s/it] {'loss': 1.1989, 'grad_norm': 0.002087667971379181, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:31<18:40, 3.86s/it] 44%|████▍ | 231/520 [14:35<18:32, 3.85s/it] {'loss': 1.2654, 'grad_norm': 0.0018733038519508814, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:35<18:32, 3.85s/it] 45%|████▍ | 232/520 [14:38<18:23, 3.83s/it] {'loss': 1.4607, 'grad_norm': 0.002361559786097295, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:38<18:23, 3.83s/it] 45%|████▍ | 233/520 [14:42<18:19, 3.83s/it] {'loss': 1.3498, 'grad_norm': 0.0023700001249628297, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:42<18:19, 3.83s/it] 45%|████▌ | 234/520 [14:46<18:14, 3.83s/it] {'loss': 1.2086, 'grad_norm': 0.0020999671817652126, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:46<18:14, 3.83s/it] 45%|████▌ | 235/520 [14:50<18:07, 3.82s/it] {'loss': 1.2669, 'grad_norm': 0.0021935642926438734, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:50<18:07, 3.82s/it] 45%|████▌ | 236/520 [14:54<18:06, 3.83s/it] {'loss': 1.3746, 'grad_norm': 0.0019798797445729215, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:54<18:06, 3.83s/it] 46%|████▌ | 237/520 [14:57<18:01, 3.82s/it] {'loss': 1.342, 'grad_norm': 0.0019972828984344145, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:57<18:01, 3.82s/it] 46%|████▌ | 238/520 [15:01<17:55, 3.82s/it] {'loss': 1.2825, 'grad_norm': 0.0021539254525655784, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:01<17:55, 3.82s/it] 46%|████▌ | 239/520 [15:05<17:52, 3.82s/it] {'loss': 1.3783, 'grad_norm': 0.0022083707517883957, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:05<17:52, 3.82s/it] 46%|████▌ | 240/520 [15:09<17:47, 3.81s/it] {'loss': 1.1526, 'grad_norm': 0.0020225193327189092, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:09<17:47, 3.81s/it] 46%|████▋ | 241/520 [15:13<17:43, 3.81s/it] {'loss': 1.2503, 'grad_norm': 0.0020126895576499026, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:13<17:43, 3.81s/it] 47%|████▋ | 242/520 [15:16<17:37, 3.80s/it] {'loss': 1.2647, 'grad_norm': 0.0018912744854509811, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:16<17:37, 3.80s/it] 47%|████▋ | 243/520 [15:20<17:35, 3.81s/it] {'loss': 1.2467, 'grad_norm': 0.0020607565063946536, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:20<17:35, 3.81s/it] 47%|████▋ | 244/520 [15:24<17:30, 3.81s/it] {'loss': 1.3815, 'grad_norm': 0.0020962540706933965, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:24<17:30, 3.81s/it] 47%|████▋ | 245/520 [15:28<17:25, 3.80s/it] {'loss': 1.2377, 'grad_norm': 0.0021105280118926296, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:28<17:25, 3.80s/it] 47%|████▋ | 246/520 [15:32<17:21, 3.80s/it] {'loss': 1.4388, 'grad_norm': 0.0023466204136991923, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:32<17:21, 3.80s/it] 48%|████▊ | 247/520 [15:35<17:16, 3.80s/it] {'loss': 1.424, 'grad_norm': 0.0021294423486797253, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:35<17:16, 3.80s/it] 48%|████▊ | 248/520 [15:39<17:11, 3.79s/it] {'loss': 1.243, 'grad_norm': 0.0021174323227935014, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:39<17:11, 3.79s/it] 48%|████▊ | 249/520 [15:43<17:08, 3.79s/it] {'loss': 1.3473, 'grad_norm': 0.0020424485738424585, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:43<17:08, 3.79s/it] 48%|████▊ | 250/520 [15:47<17:05, 3.80s/it] {'loss': 1.2773, 'grad_norm': 0.002164404577730207, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:47<17:05, 3.80s/it] 48%|████▊ | 251/520 [15:51<17:01, 3.80s/it] {'loss': 1.3454, 'grad_norm': 0.001862702318331724, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:51<17:01, 3.80s/it] 48%|████▊ | 252/520 [15:54<16:58, 3.80s/it] {'loss': 1.3247, 'grad_norm': 0.0020367269736493213, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:54<16:58, 3.80s/it] 49%|████▊ | 253/520 [15:58<16:43, 3.76s/it] {'loss': 1.3424, 'grad_norm': 0.002250633058332776, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:58<16:43, 3.76s/it] 49%|████▉ | 254/520 [16:02<16:29, 3.72s/it] {'loss': 1.2627, 'grad_norm': 0.0019206281699656656, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:02<16:29, 3.72s/it] 49%|████▉ | 255/520 [16:05<16:18, 3.69s/it] {'loss': 1.2686, 'grad_norm': 0.0022302712589648537, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:05<16:18, 3.69s/it] 49%|████▉ | 256/520 [16:09<16:08, 3.67s/it] {'loss': 1.3227, 'grad_norm': 0.002136476763017866, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:09<16:08, 3.67s/it] 49%|████▉ | 257/520 [16:13<16:02, 3.66s/it] {'loss': 1.309, 'grad_norm': 0.0020795712506721316, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:13<16:02, 3.66s/it] 50%|████▉ | 258/520 [16:16<15:59, 3.66s/it] {'loss': 1.3188, 'grad_norm': 0.0017982012569319969, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:16<15:59, 3.66s/it] 50%|████▉ | 259/520 [16:20<15:51, 3.64s/it] {'loss': 1.3834, 'grad_norm': 0.0023131916058735766, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:20<15:51, 3.64s/it] 50%|█████ | 260/520 [16:24<15:52, 3.66s/it] {'loss': 1.3874, 'grad_norm': 0.0024297153645183537, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:24<15:52, 3.66s/it] 50%|█████ | 261/520 [16:27<15:45, 3.65s/it] {'loss': 1.3256, 'grad_norm': 0.002140585324690171, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:27<15:45, 3.65s/it] 50%|█████ | 262/520 [16:31<15:39, 3.64s/it] {'loss': 1.229, 'grad_norm': 0.002136696328313153, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:31<15:39, 3.64s/it] 51%|█████ | 263/520 [16:34<15:34, 3.63s/it] {'loss': 1.3371, 'grad_norm': 0.0033252155771758, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:34<15:34, 3.63s/it] 51%|█████ | 264/520 [16:38<15:31, 3.64s/it] {'loss': 1.3508, 'grad_norm': 0.002016848053767266, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:38<15:31, 3.64s/it] 51%|█████ | 265/520 [16:42<15:26, 3.63s/it] {'loss': 1.2515, 'grad_norm': 0.0022676701414305047, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:42<15:26, 3.63s/it] 51%|█████ | 266/520 [16:45<15:26, 3.65s/it] {'loss': 1.104, 'grad_norm': 0.001834158739131681, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:45<15:26, 3.65s/it] 51%|█████▏ | 267/520 [16:49<15:22, 3.65s/it] {'loss': 1.2455, 'grad_norm': 0.0019525414477934426, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:49<15:22, 3.65s/it] 52%|█████▏ | 268/520 [16:53<15:17, 3.64s/it] {'loss': 1.4655, 'grad_norm': 0.0031341524454328454, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:53<15:17, 3.64s/it] 52%|█████▏ | 269/520 [16:56<15:13, 3.64s/it] {'loss': 1.3565, 'grad_norm': 0.0021065571285413544, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:56<15:13, 3.64s/it] 52%|█████▏ | 270/520 [17:00<15:20, 3.68s/it] {'loss': 1.2724, 'grad_norm': 0.0023139389360148483, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:00<15:20, 3.68s/it] 52%|█████▏ | 271/520 [17:04<15:28, 3.73s/it] {'loss': 1.3498, 'grad_norm': 0.002165449039876435, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:04<15:28, 3.73s/it] 52%|█████▏ | 272/520 [17:08<15:33, 3.76s/it] {'loss': 1.2789, 'grad_norm': 0.002198834009732616, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:08<15:33, 3.76s/it] 52%|█████▎ | 273/520 [17:12<15:35, 3.79s/it] {'loss': 1.4039, 'grad_norm': 0.0021626947666895414, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:12<15:35, 3.79s/it] 53%|█████▎ | 274/520 [17:15<15:31, 3.79s/it] {'loss': 1.2999, 'grad_norm': 0.0021404523562000375, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:15<15:31, 3.79s/it] 53%|█████▎ | 275/520 [17:19<15:16, 3.74s/it] {'loss': 1.2487, 'grad_norm': 0.0022915301435744453, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:19<15:16, 3.74s/it] 53%|█████▎ | 276/520 [17:23<15:05, 3.71s/it] {'loss': 1.3266, 'grad_norm': 0.002190124819499194, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:23<15:05, 3.71s/it] 53%|█████▎ | 277/520 [17:26<14:56, 3.69s/it] {'loss': 1.3992, 'grad_norm': 0.0021560960979895277, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:26<14:56, 3.69s/it] 53%|█████▎ | 278/520 [17:30<14:47, 3.67s/it] {'loss': 1.1935, 'grad_norm': 0.0019151784177066325, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:30<14:47, 3.67s/it] 54%|█████▎ | 279/520 [17:34<14:40, 3.65s/it] {'loss': 1.2937, 'grad_norm': 0.002391314415956418, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:34<14:40, 3.65s/it] 54%|█████▍ | 280/520 [17:37<14:37, 3.66s/it] {'loss': 1.2505, 'grad_norm': 0.002266972139003875, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:37<14:37, 3.66s/it] 54%|█████▍ | 281/520 [17:41<14:35, 3.66s/it] {'loss': 1.3598, 'grad_norm': 0.0021819008822330736, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:41<14:35, 3.66s/it] 54%|█████▍ | 282/520 [17:45<14:32, 3.66s/it] {'loss': 1.2139, 'grad_norm': 0.0019021223583083435, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:45<14:32, 3.66s/it] 54%|█████▍ | 283/520 [17:48<14:27, 3.66s/it] {'loss': 1.378, 'grad_norm': 0.0022660811837058484, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:48<14:27, 3.66s/it] 55%|█████▍ | 284/520 [17:52<14:23, 3.66s/it] {'loss': 1.2631, 'grad_norm': 0.002241779517667831, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:52<14:23, 3.66s/it] 55%|█████▍ | 285/520 [17:56<14:22, 3.67s/it] {'loss': 1.2424, 'grad_norm': 0.0020596822104063687, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:56<14:22, 3.67s/it] 55%|█████▌ | 286/520 [17:59<14:16, 3.66s/it] {'loss': 1.1089, 'grad_norm': 0.002113623651965732, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:59<14:16, 3.66s/it] 55%|█████▌ | 287/520 [18:03<14:14, 3.67s/it] {'loss': 1.3533, 'grad_norm': 0.0021867663948629563, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:03<14:14, 3.67s/it] 55%|█████▌ | 288/520 [18:07<14:09, 3.66s/it] {'loss': 1.3942, 'grad_norm': 0.0021960831027695877, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:07<14:09, 3.66s/it] 56%|█████▌ | 289/520 [18:10<14:06, 3.66s/it] {'loss': 1.2527, 'grad_norm': 0.0019393198060258475, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:10<14:06, 3.66s/it] 56%|█████▌ | 290/520 [18:14<14:05, 3.67s/it] {'loss': 1.1675, 'grad_norm': 0.0019157940460551514, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:14<14:05, 3.67s/it] 56%|█████▌ | 291/520 [18:18<14:03, 3.69s/it] {'loss': 1.2349, 'grad_norm': 0.0021830264682763194, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:18<14:03, 3.69s/it] 56%|█████▌ | 292/520 [18:21<14:01, 3.69s/it] {'loss': 1.2818, 'grad_norm': 0.0020271765658086164, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:21<14:01, 3.69s/it] 56%|█████▋ | 293/520 [18:25<13:56, 3.68s/it] {'loss': 1.224, 'grad_norm': 0.0022144340135014724, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:25<13:56, 3.68s/it] 57%|█████▋ | 294/520 [18:29<13:54, 3.69s/it] {'loss': 1.2436, 'grad_norm': 0.0021941512317308655, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:29<13:54, 3.69s/it] 57%|█████▋ | 295/520 [18:32<13:54, 3.71s/it] {'loss': 1.3356, 'grad_norm': 0.0023748459406431255, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:32<13:54, 3.71s/it] 57%|█████▋ | 296/520 [18:36<13:50, 3.71s/it] {'loss': 1.1949, 'grad_norm': 0.0021827966900343342, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:36<13:50, 3.71s/it] 57%|█████▋ | 297/520 [18:40<13:50, 3.73s/it] {'loss': 1.3241, 'grad_norm': 0.002181807570265519, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:40<13:50, 3.73s/it] 57%|█████▋ | 298/520 [18:44<13:46, 3.72s/it] {'loss': 1.2888, 'grad_norm': 0.0018170500243214162, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:44<13:46, 3.72s/it] 57%|█████▊ | 299/520 [18:47<13:48, 3.75s/it] {'loss': 1.3464, 'grad_norm': 0.001937088909708088, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:47<13:48, 3.75s/it] 58%|█████▊ | 300/520 [18:51<13:48, 3.76s/it] {'loss': 1.3459, 'grad_norm': 0.002043168565800871, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:51<13:48, 3.76s/it] 58%|█████▊ | 301/520 [18:55<13:45, 3.77s/it] {'loss': 1.3152, 'grad_norm': 0.002061795619409905, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:55<13:45, 3.77s/it] 58%|█████▊ | 302/520 [18:59<13:35, 3.74s/it] {'loss': 1.3699, 'grad_norm': 0.002152436953559832, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:59<13:35, 3.74s/it] 58%|█████▊ | 303/520 [19:02<13:25, 3.71s/it] {'loss': 1.2475, 'grad_norm': 0.0023867535632088595, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:02<13:25, 3.71s/it] 58%|█████▊ | 304/520 [19:06<13:17, 3.69s/it] {'loss': 1.2598, 'grad_norm': 0.0021727441806111344, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:06<13:17, 3.69s/it] 59%|█████▊ | 305/520 [19:10<13:17, 3.71s/it] {'loss': 1.3584, 'grad_norm': 0.0023293529761217024, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:10<13:17, 3.71s/it] 59%|█████▉ | 306/520 [19:13<13:10, 3.69s/it] {'loss': 1.3001, 'grad_norm': 0.0020417124688085113, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:13<13:10, 3.69s/it] 59%|█████▉ | 307/520 [19:17<13:29, 3.80s/it] {'loss': 1.234, 'grad_norm': 0.0018861664944504193, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:17<13:29, 3.80s/it] 59%|█████▉ | 308/520 [19:21<13:16, 3.76s/it] {'loss': 1.3551, 'grad_norm': 0.0020231891194545317, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:21<13:16, 3.76s/it] 59%|█████▉ | 309/520 [19:25<13:08, 3.74s/it] {'loss': 1.2292, 'grad_norm': 0.001916162459429716, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:25<13:08, 3.74s/it] 60%|█████▉ | 310/520 [19:28<12:57, 3.70s/it] {'loss': 1.2038, 'grad_norm': 0.001979616632546856, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:28<12:57, 3.70s/it] 60%|█████▉ | 311/520 [19:32<12:51, 3.69s/it] {'loss': 1.1822, 'grad_norm': 0.001968676819158757, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:32<12:51, 3.69s/it] 60%|██████ | 312/520 [19:36<12:44, 3.67s/it] {'loss': 1.1708, 'grad_norm': 0.002172478659517267, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:36<12:44, 3.67s/it] 60%|██████ | 313/520 [19:39<12:38, 3.67s/it] {'loss': 1.1611, 'grad_norm': 0.0018391231275921966, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:39<12:38, 3.67s/it] 60%|██████ | 314/520 [19:43<13:04, 3.81s/it] {'loss': 1.203, 'grad_norm': 0.0018974323709152713, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:44<13:04, 3.81s/it] 61%|██████ | 315/520 [19:47<13:03, 3.82s/it] {'loss': 1.3209, 'grad_norm': 0.002537073920830591, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:47<13:03, 3.82s/it] 61%|██████ | 316/520 [19:52<13:25, 3.95s/it] {'loss': 1.1717, 'grad_norm': 0.0024919419725260915, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:52<13:25, 3.95s/it] 61%|██████ | 317/520 [19:55<13:14, 3.92s/it] {'loss': 1.1997, 'grad_norm': 0.0018283948137989108, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:55<13:14, 3.92s/it] 61%|██████ | 318/520 [19:59<13:07, 3.90s/it] {'loss': 1.3211, 'grad_norm': 0.0022502876487805338, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:59<13:07, 3.90s/it] 61%|██████▏ | 319/520 [20:04<13:26, 4.01s/it] {'loss': 1.1842, 'grad_norm': 0.002074264972673255, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:04<13:26, 4.01s/it] 62%|██████▏ | 320/520 [20:07<13:12, 3.96s/it] {'loss': 1.13, 'grad_norm': 0.0021540366749230647, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:07<13:12, 3.96s/it] 62%|██████▏ | 321/520 [20:11<13:00, 3.92s/it] {'loss': 1.33, 'grad_norm': 0.0021508954067566326, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:11<13:00, 3.92s/it] 62%|██████▏ | 322/520 [20:15<12:42, 3.85s/it] {'loss': 1.1862, 'grad_norm': 0.0020232413093536983, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:15<12:42, 3.85s/it] 62%|██████▏ | 323/520 [20:19<12:28, 3.80s/it] {'loss': 1.2612, 'grad_norm': 0.00219260178983091, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:19<12:28, 3.80s/it] 62%|██████▏ | 324/520 [20:22<12:22, 3.79s/it] {'loss': 1.2591, 'grad_norm': 0.0022208791033312613, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:22<12:22, 3.79s/it] 62%|██████▎ | 325/520 [20:26<12:34, 3.87s/it] {'loss': 1.2716, 'grad_norm': 0.0021568574135961377, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:26<12:34, 3.87s/it] 63%|██████▎ | 326/520 [20:30<12:18, 3.81s/it] {'loss': 1.2527, 'grad_norm': 0.0020555400504631925, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:30<12:18, 3.81s/it] 63%|██████▎ | 327/520 [20:34<12:04, 3.75s/it] {'loss': 1.3296, 'grad_norm': 0.0023257476508934823, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:34<12:04, 3.75s/it] 63%|██████▎ | 328/520 [20:37<11:54, 3.72s/it] {'loss': 1.3222, 'grad_norm': 0.0021387398587444013, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:37<11:54, 3.72s/it] 63%|██████▎ | 329/520 [20:41<11:45, 3.70s/it] {'loss': 1.1727, 'grad_norm': 0.0017923735254660343, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:41<11:45, 3.70s/it] 63%|██████▎ | 330/520 [20:45<11:39, 3.68s/it] {'loss': 1.252, 'grad_norm': 0.0018175524472766476, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:45<11:39, 3.68s/it] 64%|██████▎ | 331/520 [20:48<11:35, 3.68s/it] {'loss': 1.2092, 'grad_norm': 0.0019253705002589632, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:48<11:35, 3.68s/it] 64%|██████▍ | 332/520 [20:52<11:30, 3.67s/it] {'loss': 1.344, 'grad_norm': 0.0019693495552451563, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:52<11:30, 3.67s/it] 64%|██████▍ | 333/520 [20:56<11:26, 3.67s/it] {'loss': 1.3742, 'grad_norm': 0.0021645300456609915, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:56<11:26, 3.67s/it] 64%|██████▍ | 334/520 [20:59<11:22, 3.67s/it] {'loss': 1.2577, 'grad_norm': 0.0022624609216441625, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:59<11:22, 3.67s/it] 64%|██████▍ | 335/520 [21:03<11:22, 3.69s/it] {'loss': 1.2518, 'grad_norm': 0.001849787261060203, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:03<11:22, 3.69s/it] 65%|██████▍ | 336/520 [21:07<11:17, 3.68s/it] {'loss': 1.1432, 'grad_norm': 0.002193110420928903, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:07<11:17, 3.68s/it] 65%|██████▍ | 337/520 [21:10<11:14, 3.69s/it] {'loss': 1.1362, 'grad_norm': 0.0020037711586905213, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:10<11:14, 3.69s/it] 65%|██████▌ | 338/520 [21:14<11:09, 3.68s/it] {'loss': 1.2676, 'grad_norm': 0.0020547223087033356, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:14<11:09, 3.68s/it] 65%|██████▌ | 339/520 [21:18<11:07, 3.69s/it] {'loss': 1.2097, 'grad_norm': 0.002010798097896333, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:18<11:07, 3.69s/it] 65%|██████▌ | 340/520 [21:21<11:03, 3.69s/it] {'loss': 1.201, 'grad_norm': 0.0019698439967686934, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:21<11:03, 3.69s/it] 66%|██████▌ | 341/520 [21:25<10:58, 3.68s/it] {'loss': 1.2207, 'grad_norm': 0.002093554915901661, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:25<10:58, 3.68s/it] 66%|██████▌ | 342/520 [21:29<10:53, 3.67s/it] {'loss': 1.3122, 'grad_norm': 0.002370992550974762, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:29<10:53, 3.67s/it] 66%|██████▌ | 343/520 [21:32<10:50, 3.68s/it] {'loss': 1.2672, 'grad_norm': 0.002052263104833703, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:32<10:50, 3.68s/it] 66%|██████▌ | 344/520 [21:36<10:45, 3.67s/it] {'loss': 1.1703, 'grad_norm': 0.0021050444884468424, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:36<10:45, 3.67s/it] 66%|██████▋ | 345/520 [21:40<10:43, 3.68s/it] {'loss': 1.2901, 'grad_norm': 0.0023465072563332114, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:40<10:43, 3.68s/it] 67%|██████▋ | 346/520 [21:43<10:40, 3.68s/it] {'loss': 1.2648, 'grad_norm': 0.0019087903522390925, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:44<10:40, 3.68s/it] 67%|██████▋ | 347/520 [21:47<10:36, 3.68s/it] {'loss': 1.1862, 'grad_norm': 0.0018361157648314958, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:47<10:36, 3.68s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:51<10:30, 3.67s/it] {'loss': 1.1479, 'grad_norm': 0.002478035765960154, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:51<10:30, 3.67s/it] 67%|██████▋ | 349/520 [21:54<10:26, 3.66s/it] {'loss': 1.1939, 'grad_norm': 0.002151411191055784, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:54<10:26, 3.66s/it] 67%|██████▋ | 350/520 [21:58<10:20, 3.65s/it] {'loss': 1.2323, 'grad_norm': 0.0021253494177697002, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:58<10:20, 3.65s/it] 68%|██████▊ | 351/520 [22:02<10:18, 3.66s/it] {'loss': 1.1379, 'grad_norm': 0.0018818218823849183, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:02<10:18, 3.66s/it] 68%|██████▊ | 352/520 [22:05<10:14, 3.66s/it] {'loss': 1.2576, 'grad_norm': 0.001941284635565753, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:05<10:14, 3.66s/it] 68%|██████▊ | 353/520 [22:09<10:10, 3.66s/it] {'loss': 1.2149, 'grad_norm': 0.0016867605662291398, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:09<10:10, 3.66s/it] 68%|██████▊ | 354/520 [22:13<10:08, 3.66s/it] {'loss': 1.351, 'grad_norm': 0.0020002044296684026, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:13<10:08, 3.66s/it] 68%|██████▊ | 355/520 [22:16<10:02, 3.65s/it] {'loss': 1.1976, 'grad_norm': 0.002099934787604769, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:16<10:02, 3.65s/it] 68%|██████▊ | 356/520 [22:20<09:58, 3.65s/it] {'loss': 1.2048, 'grad_norm': 0.0020638559721390596, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:20<09:58, 3.65s/it] 69%|██████▊ | 357/520 [22:24<09:55, 3.66s/it] {'loss': 1.2254, 'grad_norm': 0.0018588769806754642, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:24<09:55, 3.66s/it] 69%|██████▉ | 358/520 [22:27<09:52, 3.66s/it] {'loss': 1.1509, 'grad_norm': 0.0019348703169351058, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:27<09:52, 3.66s/it] 69%|██████▉ | 359/520 [22:31<09:47, 3.65s/it] {'loss': 1.2797, 'grad_norm': 0.0021745986048172846, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:31<09:47, 3.65s/it] 69%|██████▉ | 360/520 [22:35<09:43, 3.65s/it] {'loss': 1.3077, 'grad_norm': 0.0022193852431584052, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:35<09:43, 3.65s/it] 69%|██████▉ | 361/520 [22:38<09:39, 3.64s/it] {'loss': 1.2887, 'grad_norm': 0.001863251300740124, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:38<09:39, 3.64s/it] 70%|██████▉ | 362/520 [22:42<09:35, 3.64s/it] {'loss': 1.221, 'grad_norm': 0.0021504315710800886, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:42<09:35, 3.64s/it] 70%|██████▉ | 363/520 [22:46<09:31, 3.64s/it] {'loss': 1.2349, 'grad_norm': 0.0019425629711976875, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:46<09:31, 3.64s/it] 70%|███████ | 364/520 [22:49<09:28, 3.64s/it] {'loss': 1.303, 'grad_norm': 0.001970993149316419, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:49<09:28, 3.64s/it] 70%|███████ | 365/520 [22:53<09:24, 3.64s/it] {'loss': 1.3019, 'grad_norm': 0.0020710407912343065, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:53<09:24, 3.64s/it] 70%|███████ | 366/520 [22:56<09:21, 3.64s/it] {'loss': 1.2523, 'grad_norm': 0.0019064232166169377, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:56<09:21, 3.64s/it] 71%|███████ | 367/520 [23:00<09:18, 3.65s/it] {'loss': 1.2487, 'grad_norm': 0.00197583144118802, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:00<09:18, 3.65s/it] 71%|███████ | 368/520 [23:04<09:13, 3.64s/it] {'loss': 1.1053, 'grad_norm': 0.0021731717533512653, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:04<09:13, 3.64s/it] 71%|███████ | 369/520 [23:07<09:09, 3.64s/it] {'loss': 1.2698, 'grad_norm': 0.0020943716494660845, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:07<09:09, 3.64s/it] 71%|███████ | 370/520 [23:11<09:05, 3.64s/it] {'loss': 1.1688, 'grad_norm': 0.0018088768178374387, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:11<09:05, 3.64s/it] 71%|███████▏ | 371/520 [23:15<09:02, 3.64s/it] {'loss': 1.1641, 'grad_norm': 0.002065658178897873, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:15<09:02, 3.64s/it] 72%|███████▏ | 372/520 [23:18<08:58, 3.64s/it] {'loss': 1.3484, 'grad_norm': 0.001821210055196644, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:18<08:58, 3.64s/it] 72%|███████▏ | 373/520 [23:22<08:55, 3.64s/it] {'loss': 1.228, 'grad_norm': 0.0020924259604889793, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:22<08:55, 3.64s/it] 72%|███████▏ | 374/520 [23:26<08:51, 3.64s/it] {'loss': 1.2464, 'grad_norm': 0.001933145934190836, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:26<08:51, 3.64s/it] 72%|███████▏ | 375/520 [23:29<08:47, 3.64s/it] {'loss': 1.162, 'grad_norm': 0.0020133471355383713, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:29<08:47, 3.64s/it] 72%|███████▏ | 376/520 [23:33<08:45, 3.65s/it] {'loss': 1.2861, 'grad_norm': 0.002058031817210632, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:33<08:45, 3.65s/it] 72%|███████▎ | 377/520 [23:37<08:40, 3.64s/it] {'loss': 1.2195, 'grad_norm': 0.0020166976322983847, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:37<08:40, 3.64s/it] 73%|███████▎ | 378/520 [23:40<08:36, 3.63s/it] {'loss': 1.2701, 'grad_norm': 0.0019425205408959336, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:40<08:36, 3.63s/it] 73%|███████▎ | 379/520 [23:44<08:32, 3.64s/it] {'loss': 1.2485, 'grad_norm': 0.0018571634092386486, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:44<08:32, 3.64s/it] 73%|███████▎ | 380/520 [23:47<08:29, 3.64s/it] {'loss': 1.341, 'grad_norm': 0.003706593974370449, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:47<08:29, 3.64s/it] 73%|███████▎ | 381/520 [23:51<08:26, 3.65s/it] {'loss': 1.2466, 'grad_norm': 0.0019034675119363428, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:51<08:26, 3.65s/it] 73%|███████▎ | 382/520 [23:55<08:23, 3.65s/it] {'loss': 1.2797, 'grad_norm': 0.002079234870388715, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:55<08:23, 3.65s/it] 74%|███████▎ | 383/520 [23:58<08:19, 3.65s/it] {'loss': 1.0866, 'grad_norm': 0.002090388354459422, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:58<08:19, 3.65s/it] 74%|███████▍ | 384/520 [24:02<08:15, 3.64s/it] {'loss': 1.3557, 'grad_norm': 0.0020766922965410614, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:02<08:15, 3.64s/it] 74%|███████▍ | 385/520 [24:06<08:20, 3.71s/it] {'loss': 1.2245, 'grad_norm': 0.0018281111573326613, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:06<08:20, 3.71s/it] 74%|███████▍ | 386/520 [24:10<08:23, 3.76s/it] {'loss': 1.1764, 'grad_norm': 0.001705256665596047, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:10<08:23, 3.76s/it] 74%|███████▍ | 387/520 [24:14<08:24, 3.80s/it] {'loss': 1.351, 'grad_norm': 0.0019150234425883213, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:14<08:24, 3.80s/it] 75%|███████▍ | 388/520 [24:18<08:24, 3.82s/it] {'loss': 1.1266, 'grad_norm': 0.0018562365547749766, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:18<08:24, 3.82s/it] 75%|███████▍ | 389/520 [24:21<08:23, 3.84s/it] {'loss': 1.1813, 'grad_norm': 0.0022599072981584496, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:21<08:23, 3.84s/it] 75%|███████▌ | 390/520 [24:25<08:22, 3.86s/it] {'loss': 1.2429, 'grad_norm': 0.0017985910208313301, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:25<08:22, 3.86s/it] 75%|███████▌ | 391/520 [24:29<08:19, 3.88s/it] {'loss': 1.3266, 'grad_norm': 0.0020502368238228636, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:29<08:19, 3.88s/it] 75%|███████▌ | 392/520 [24:33<08:15, 3.87s/it] {'loss': 1.1312, 'grad_norm': 0.001847435503144295, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:33<08:15, 3.87s/it] 76%|███████▌ | 393/520 [24:37<08:03, 3.80s/it] {'loss': 1.1691, 'grad_norm': 0.0017316684251936299, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:37<08:03, 3.80s/it] 76%|███████▌ | 394/520 [24:40<07:53, 3.76s/it] {'loss': 1.1946, 'grad_norm': 0.0020981613862670837, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:40<07:53, 3.76s/it] 76%|███████▌ | 395/520 [24:44<07:47, 3.74s/it][E ProcessGroupNCCL.cpp:474] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617705 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 6] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883880, OpType=_ALLGATHER_BASE, NumelIn=100352, NumelOut=802816, Timeout(ms)=1800000) ran for 2618374 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=231324, OpType=_REDUCE_SCATTER_BASE, NumelIn=4358144, NumelOut=544768, Timeout(ms)=1800000) ran for 2618150 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 6] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=231437, OpType=_REDUCE_SCATTER_BASE, NumelIn=4358144, NumelOut=544768, Timeout(ms)=1800000) ran for 2617845 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=231324, OpType=_REDUCE_SCATTER_BASE, NumelIn=4358144, NumelOut=544768, Timeout(ms)=1800000) ran for 2618175 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=231324, OpType=_REDUCE_SCATTER_BASE, NumelIn=4358144, NumelOut=544768, Timeout(ms)=1800000) ran for 2618161 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=231470, OpType=ALLREDUCE, NumelIn=1, NumelOut=1, Timeout(ms)=1800000) ran for 2617714 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=231470, OpType=ALLREDUCE, NumelIn=1, NumelOut=1, Timeout(ms)=1800000) ran for 2617713 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801879:809209 [4] NCCL INFO [Service thread] Connection closed by localRank 4 +ywang29-vrdb-test1-worker-0:801876:809218 [1] NCCL INFO [Service thread] Connection closed by localRank 1 +ywang29-vrdb-test1-worker-0:801877:809214 [2] NCCL INFO [Service thread] Connection closed by localRank 2 +ywang29-vrdb-test1-worker-0:801882:809223 [7] NCCL INFO [Service thread] Connection closed by localRank 7 +ywang29-vrdb-test1-worker-0:801878:803523 [3] NCCL INFO [Service thread] Connection closed by localRank 3 +ywang29-vrdb-test1-worker-0:801876:803520 [1] NCCL INFO [Service thread] Connection closed by localRank 1 +ywang29-vrdb-test1-worker-0:801879:803526 [4] NCCL INFO [Service thread] Connection closed by localRank 4 +ywang29-vrdb-test1-worker-0:801877:803515 [2] NCCL INFO [Service thread] Connection closed by localRank 2 +ywang29-vrdb-test1-worker-0:801878:809213 [3] NCCL INFO [Service thread] Connection closed by localRank 3 +[E ProcessGroupNCCL.cpp:474] [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=231324, OpType=_REDUCE_SCATTER_BASE, NumelIn=4358144, NumelOut=544768, Timeout(ms)=1800000) ran for 2618178 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801881:809211 [6] NCCL INFO [Service thread] Connection closed by localRank 6 +[E ProcessGroupNCCL.cpp:474] [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883889, OpType=_ALLGATHER_BASE, NumelIn=544768, NumelOut=4358144, Timeout(ms)=1800000) ran for 2618381 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801881:803521 [6] NCCL INFO [Service thread] Connection closed by localRank 6 +ywang29-vrdb-test1-worker-0:801882:803514 [7] NCCL INFO [Service thread] Connection closed by localRank 7 +ywang29-vrdb-test1-worker-0:801880:809221 [5] NCCL INFO [Service thread] Connection closed by localRank 5 +ywang29-vrdb-test1-worker-0:801880:803527 [5] NCCL INFO [Service thread] Connection closed by localRank 5 +ywang29-vrdb-test1-worker-0:801882:802978 [7] NCCL INFO comm 0x5611c9a94ed0 rank 7 nranks 8 cudaDev 7 busId a01d0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 7] NCCL watchdog thread terminated with exception: [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 7] NCCL watchdog thread terminated with exception: [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801881:802993 [6] NCCL INFO comm 0x55f5f4aefea0 rank 6 nranks 8 cudaDev 6 busId a01c0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 6] NCCL watchdog thread terminated with exception: [Rank 6] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883880, OpType=_ALLGATHER_BASE, NumelIn=100352, NumelOut=802816, Timeout(ms)=1800000) ran for 2618374 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 6] NCCL watchdog thread terminated with exception: [Rank 6] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883880, OpType=_ALLGATHER_BASE, NumelIn=100352, NumelOut=802816, Timeout(ms)=1800000) ran for 2618374 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801877:802991 [2] NCCL INFO comm 0x55ba4a86d490 rank 2 nranks 8 cudaDev 2 busId 201c0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 2] NCCL watchdog thread terminated with exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 2] NCCL watchdog thread terminated with exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801880:802975 [5] NCCL INFO comm 0x55d5dd1b6c00 rank 5 nranks 8 cudaDev 5 busId 901d0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 5] NCCL watchdog thread terminated with exception: [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883889, OpType=_ALLGATHER_BASE, NumelIn=544768, NumelOut=4358144, Timeout(ms)=1800000) ran for 2618381 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 5] NCCL watchdog thread terminated with exception: [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883889, OpType=_ALLGATHER_BASE, NumelIn=544768, NumelOut=4358144, Timeout(ms)=1800000) ran for 2618381 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801876:802977 [1] NCCL INFO comm 0x55a6c5aca810 rank 1 nranks 8 cudaDev 1 busId 101d0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 1] NCCL watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 1] NCCL watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801879:802992 [4] NCCL INFO comm 0x5612336c7110 rank 4 nranks 8 cudaDev 4 busId 901c0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 4] NCCL watchdog thread terminated with exception: [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617705 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 4] NCCL watchdog thread terminated with exception: [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617705 milliseconds before timing out. +ywang29-vrdb-test1-worker-0:801878:802976 [3] NCCL INFO comm 0x557ce59c2940 rank 3 nranks 8 cudaDev 3 busId 201d0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 3] NCCL watchdog thread terminated with exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 3] NCCL watchdog thread terminated with exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=883935, OpType=_ALLGATHER_BASE, NumelIn=16, NumelOut=128, Timeout(ms)=1800000) ran for 2617706 milliseconds before timing out. +[2025-10-13 14:39:07,095] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 801875 +[2025-10-13 14:39:08,018] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 801876 +[2025-10-13 14:39:10,584] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 801877 +[2025-10-13 14:39:10,587] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 801878 +[2025-10-13 14:39:10,589] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 801879 +[2025-10-13 14:39:10,589] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 801880 +[2025-10-13 14:39:10,591] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 801881 +[2025-10-13 14:39:10,593] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 801882 +[2025-10-13 14:39:10,594] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = -6 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.3_2e-1_connector-3.0_2.3_2e-1_ablation_20251013_130305.log +Timestamp: 2025-10-13 14:39:11 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_143914.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_143914.log new file mode 100644 index 0000000000000000000000000000000000000000..bb09e8c4aa2c24806cb7b37f30157df6aef58576 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_143914.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_143914.log +Timestamp: 2025-10-13 14:39:14 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 14:39:17,605] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:21,125] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 14:39:21,126] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 2.5 --temperature_mlp_text 2.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 2.5 --temperature_mlp_vision 2.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 2.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 14:39:23,728] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:24,745] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 14:39:24,745] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 14:39:24,745] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 14:39:24,745] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 14:39:24,746] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 14:39:24,746] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 14:39:24,746] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 14:39:24,748] [INFO] [launch.py:253:main] process 828753 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 14:39:24,750] [INFO] [launch.py:253:main] process 828754 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 14:39:24,752] [INFO] [launch.py:253:main] process 828755 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 14:39:24,755] [INFO] [launch.py:253:main] process 828756 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 14:39:24,757] [INFO] [launch.py:253:main] process 828757 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 14:39:24,759] [INFO] [launch.py:253:main] process 828758 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 14:39:24,761] [INFO] [launch.py:253:main] process 828759 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 14:39:24,763] [INFO] [launch.py:253:main] process 828760 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 14:39:31,682] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:31,683] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:31,683] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:31,683] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:31,683] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:31,683] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:31,683] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:31,685] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 14:39:32,308] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 14:39:32,308] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 14:39:32,308] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 14:39:32,308] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 14:39:32,308] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 14:39:32,308] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 14:39:32,308] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 14:39:32,308] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 14:39:32,308] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 2.5, 'temperature_mlp': 2.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 2.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.5, + "temperature_mlp": 2.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:828753:828753 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828753:828753 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828753:828753 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:828753:828753 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:828753:828753 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:828753:828753 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:828754:828754 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:828754:828754 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828754:828754 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828754:828754 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:828754:828754 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:828754:828754 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:828756:828756 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:828756:828756 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828756:828756 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828756:828756 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:828756:828756 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:828756:828756 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:828755:828755 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:828755:828755 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828755:828755 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828755:828755 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:828755:828755 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:828755:828755 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:828758:828758 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:828758:828758 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828758:828758 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828758:828758 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:828758:828758 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:828758:828758 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:828760:828760 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:828760:828760 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828760:828760 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828760:828760 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:828760:828760 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:828760:828760 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:828757:828757 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:828757:828757 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828757:828757 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828757:828757 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:828757:828757 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:828757:828757 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:828759:828759 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:828759:828759 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828759:828759 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828759:828759 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:828759:828759 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:828759:828759 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO ncclCommInitRank comm 0x5611bb1fb000 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x848af8e6743468f6 - Init START +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO ncclCommInitRank comm 0x55b2da9aa950 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x848af8e6743468f6 - Init START +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO ncclCommInitRank comm 0x559ff0405f00 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x848af8e6743468f6 - Init START +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO ncclCommInitRank comm 0x56234865e6f0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x848af8e6743468f6 - Init START +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO ncclCommInitRank comm 0x558b21e1c110 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x848af8e6743468f6 - Init START +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO ncclCommInitRank comm 0x55eca8e514c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x848af8e6743468f6 - Init START +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO ncclCommInitRank comm 0x560914306440 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x848af8e6743468f6 - Init START +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO ncclCommInitRank comm 0x5598d252bd00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x848af8e6743468f6 - Init START +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO comm 0x5611bb1fb000 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO comm 0x559ff0405f00 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO comm 0x55eca8e514c0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO comm 0x56234865e6f0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO comm 0x55b2da9aa950 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO comm 0x5598d252bd00 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO comm 0x560914306440 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO comm 0x558b21e1c110 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:828757:830391 [4] NCCL INFO ncclCommInitRank comm 0x5598d252bd00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x848af8e6743468f6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:828760:830390 [7] NCCL INFO ncclCommInitRank comm 0x56234865e6f0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x848af8e6743468f6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:828759:830392 [6] NCCL INFO ncclCommInitRank comm 0x5611bb1fb000 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x848af8e6743468f6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:828758:830373 [5] NCCL INFO ncclCommInitRank comm 0x559ff0405f00 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x848af8e6743468f6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:828754:830370 [1] NCCL INFO ncclCommInitRank comm 0x55b2da9aa950 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x848af8e6743468f6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:828756:830371 [3] NCCL INFO ncclCommInitRank comm 0x560914306440 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x848af8e6743468f6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:828755:830372 [2] NCCL INFO ncclCommInitRank comm 0x558b21e1c110 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x848af8e6743468f6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:828753:830352 [0] NCCL INFO ncclCommInitRank comm 0x55eca8e514c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x848af8e6743468f6 - Init COMPLETE +[2025-10-13 14:40:03,777] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 14:40:05,546] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 14:40:23,795 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 14:40:23,800 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:828759:835388 [6] NCCL INFO ncclCommInitRank comm 0x7f9fd806b400 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xa7188b252ab4d52c - Init COMPLETE +ywang29-vrdb-test1-worker-0:828757:835383 [4] NCCL INFO ncclCommInitRank comm 0x7f2db406b430 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xa7188b252ab4d52c - Init COMPLETE +ywang29-vrdb-test1-worker-0:828756:835384 [3] NCCL INFO ncclCommInitRank comm 0x7f882c06b7d0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xa7188b252ab4d52c - Init COMPLETE +ywang29-vrdb-test1-worker-0:828753:835382 [0] NCCL INFO ncclCommInitRank comm 0x7f796806a610 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xa7188b252ab4d52c - Init COMPLETE +ywang29-vrdb-test1-worker-0:828760:835389 [7] NCCL INFO ncclCommInitRank comm 0x7fe9a406ab90 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xa7188b252ab4d52c - Init COMPLETE +ywang29-vrdb-test1-worker-0:828755:835385 [2] NCCL INFO ncclCommInitRank comm 0x7efb5c06ace0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xa7188b252ab4d52c - Init COMPLETE +ywang29-vrdb-test1-worker-0:828758:835386 [5] NCCL INFO ncclCommInitRank comm 0x7f6f6806b350 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xa7188b252ab4d52c - Init COMPLETE +ywang29-vrdb-test1-worker-0:828754:835387 [1] NCCL INFO ncclCommInitRank comm 0x7f156806b010 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xa7188b252ab4d52c - Init COMPLETE + 0%| | 1/520 [00:14<2:04:35, 14.40s/it] {'loss': 4.6062, 'grad_norm': 0.23044769289873876, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:04:35, 14.40s/it] 0%| | 2/520 [00:18<1:10:31, 8.17s/it] {'loss': 4.2073, 'grad_norm': 0.21154669178455066, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:10:31, 8.17s/it] 1%| | 3/520 [00:22<53:14, 6.18s/it] {'loss': 2.4879, 'grad_norm': 0.04418982364064686, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:22<53:14, 6.18s/it] 1%| | 4/520 [00:25<45:03, 5.24s/it] {'loss': 2.0476, 'grad_norm': 0.020874144220696948, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<45:03, 5.24s/it] 1%| | 5/520 [00:29<40:37, 4.73s/it] {'loss': 2.0999, 'grad_norm': 0.029951588895708522, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:37, 4.73s/it] 1%| | 6/520 [00:33<37:52, 4.42s/it] {'loss': 1.8513, 'grad_norm': 0.016711145370278888, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:33<37:52, 4.42s/it] 1%|▏ | 7/520 [00:37<36:04, 4.22s/it] {'loss': 1.6559, 'grad_norm': 0.01262826814129607, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:37<36:04, 4.22s/it] 2%|▏ | 8/520 [00:41<36:31, 4.28s/it] {'loss': 1.6669, 'grad_norm': 0.007553788100023119, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:31, 4.28s/it] 2%|▏ | 9/520 [00:46<36:40, 4.31s/it] {'loss': 1.7146, 'grad_norm': 0.0075887747956428254, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:46<36:40, 4.31s/it] 2%|▏ | 10/520 [00:49<35:24, 4.17s/it] {'loss': 1.4981, 'grad_norm': 0.005761060079910854, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<35:24, 4.17s/it] 2%|▏ | 11/520 [00:53<34:18, 4.04s/it] {'loss': 1.6196, 'grad_norm': 0.00823485772182664, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:18, 4.04s/it] 2%|▏ | 12/520 [00:57<33:13, 3.92s/it] {'loss': 1.5659, 'grad_norm': 0.00568022277795478, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<33:13, 3.92s/it][2025-10-13 14:41:30,355] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<34:08, 4.04s/it] {'loss': 1.5434, 'grad_norm': 0.005676673027096606, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<34:08, 4.04s/it] 3%|▎ | 14/520 [01:05<33:03, 3.92s/it] {'loss': 1.5809, 'grad_norm': 0.005326558608569862, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<33:03, 3.92s/it] 3%|▎ | 15/520 [01:08<32:17, 3.84s/it] {'loss': 1.6075, 'grad_norm': 0.006161218919342323, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<32:17, 3.84s/it] 3%|▎ | 16/520 [01:12<31:42, 3.77s/it] {'loss': 1.5391, 'grad_norm': 0.004343385322744803, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<31:42, 3.77s/it] 3%|▎ | 17/520 [01:16<31:16, 3.73s/it] {'loss': 1.6176, 'grad_norm': 0.004764289250956966, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<31:16, 3.73s/it] 3%|▎ | 18/520 [01:19<30:58, 3.70s/it] {'loss': 1.4537, 'grad_norm': 0.004509421386295318, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<30:58, 3.70s/it] 4%|▎ | 19/520 [01:23<30:43, 3.68s/it] {'loss': 1.5771, 'grad_norm': 0.004488317024248462, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<30:43, 3.68s/it] 4%|▍ | 20/520 [01:27<30:33, 3.67s/it] {'loss': 1.4621, 'grad_norm': 0.00421532756868008, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:27<30:33, 3.67s/it] 4%|▍ | 21/520 [01:30<30:29, 3.67s/it] {'loss': 1.6037, 'grad_norm': 0.00593859372919428, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<30:29, 3.67s/it] 4%|▍ | 22/520 [01:34<30:21, 3.66s/it] {'loss': 1.5969, 'grad_norm': 0.0034638774804315667, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:21, 3.66s/it] 4%|▍ | 23/520 [01:38<30:14, 3.65s/it] {'loss': 1.5332, 'grad_norm': 0.004950974251979134, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:38<30:14, 3.65s/it] 5%|▍ | 24/520 [01:41<30:20, 3.67s/it] {'loss': 1.5242, 'grad_norm': 0.004321743785696882, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:20, 3.67s/it] 5%|▍ | 25/520 [01:45<30:12, 3.66s/it] {'loss': 1.5502, 'grad_norm': 0.004161208291882957, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:12, 3.66s/it] 5%|▌ | 26/520 [01:49<30:10, 3.66s/it] {'loss': 1.5357, 'grad_norm': 0.003641745618020627, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:49<30:10, 3.66s/it] 5%|▌ | 27/520 [01:52<30:00, 3.65s/it] {'loss': 1.4404, 'grad_norm': 0.0036684208781035095, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:00, 3.65s/it] 5%|▌ | 28/520 [01:56<29:54, 3.65s/it] {'loss': 1.4227, 'grad_norm': 0.003956558363442798, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<29:54, 3.65s/it] 6%|▌ | 29/520 [01:59<29:53, 3.65s/it] {'loss': 1.4445, 'grad_norm': 0.0033724841502708893, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<29:53, 3.65s/it] 6%|▌ | 30/520 [02:03<29:48, 3.65s/it] {'loss': 1.6202, 'grad_norm': 0.004472922806750697, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<29:48, 3.65s/it] 6%|▌ | 31/520 [02:07<29:45, 3.65s/it] {'loss': 1.4361, 'grad_norm': 0.002933275654201493, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<29:45, 3.65s/it] 6%|▌ | 32/520 [02:10<29:49, 3.67s/it] {'loss': 1.6005, 'grad_norm': 0.006795869994842586, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:49, 3.67s/it] 6%|▋ | 33/520 [02:14<30:08, 3.71s/it] {'loss': 1.4486, 'grad_norm': 0.004153414048607688, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<30:08, 3.71s/it] 7%|▋ | 34/520 [02:18<30:13, 3.73s/it] {'loss': 1.4272, 'grad_norm': 0.004804147588835546, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<30:13, 3.73s/it] 7%|▋ | 35/520 [02:22<30:18, 3.75s/it] {'loss': 1.4638, 'grad_norm': 0.005196557147556292, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:22<30:18, 3.75s/it] 7%|▋ | 36/520 [02:26<30:18, 3.76s/it] {'loss': 1.5631, 'grad_norm': 0.003382097570337474, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:26<30:18, 3.76s/it] 7%|▋ | 37/520 [02:29<29:49, 3.71s/it] {'loss': 1.6211, 'grad_norm': 0.006271010911132227, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:29<29:49, 3.71s/it] 7%|▋ | 38/520 [02:33<29:36, 3.69s/it] {'loss': 1.6405, 'grad_norm': 0.004674480258280743, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:33<29:36, 3.69s/it] 8%|▊ | 39/520 [02:36<29:21, 3.66s/it] {'loss': 1.4603, 'grad_norm': 0.0034352910664089084, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:36<29:21, 3.66s/it] 8%|▊ | 40/520 [02:40<29:10, 3.65s/it] {'loss': 1.5029, 'grad_norm': 0.0040631393354664735, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:40<29:10, 3.65s/it] 8%|▊ | 41/520 [02:44<29:05, 3.64s/it] {'loss': 1.469, 'grad_norm': 0.0031886975040331393, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:44<29:05, 3.64s/it] 8%|▊ | 42/520 [02:47<28:56, 3.63s/it] {'loss': 1.5129, 'grad_norm': 0.0045128663062959295, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<28:56, 3.63s/it] 8%|▊ | 43/520 [02:51<28:51, 3.63s/it] {'loss': 1.4812, 'grad_norm': 0.0049877485399815406, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<28:51, 3.63s/it] 8%|▊ | 44/520 [02:55<28:45, 3.63s/it] {'loss': 1.5976, 'grad_norm': 0.004077381203999212, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:55<28:45, 3.63s/it] 9%|▊ | 45/520 [02:58<28:40, 3.62s/it] {'loss': 1.5058, 'grad_norm': 0.005458820502526897, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<28:40, 3.62s/it] 9%|▉ | 46/520 [03:02<28:35, 3.62s/it] {'loss': 1.6602, 'grad_norm': 0.003681966716570326, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<28:35, 3.62s/it] 9%|▉ | 47/520 [03:05<28:33, 3.62s/it] {'loss': 1.49, 'grad_norm': 0.005573337765033339, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<28:33, 3.62s/it] 9%|▉ | 48/520 [03:09<28:30, 3.62s/it] {'loss': 1.4572, 'grad_norm': 0.003298254313073711, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<28:30, 3.62s/it] 9%|▉ | 49/520 [03:13<28:27, 3.63s/it] {'loss': 1.4938, 'grad_norm': 0.004032921825276475, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<28:27, 3.63s/it] 10%|▉ | 50/520 [03:16<28:24, 3.63s/it] {'loss': 1.4889, 'grad_norm': 0.00306068922059552, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:16<28:24, 3.63s/it] 10%|▉ | 51/520 [03:20<28:16, 3.62s/it] {'loss': 1.4159, 'grad_norm': 0.0038797474327856903, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:20<28:16, 3.62s/it] 10%|█ | 52/520 [03:24<28:14, 3.62s/it] {'loss': 1.5479, 'grad_norm': 0.005344001101699629, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:24<28:14, 3.62s/it] 10%|█ | 53/520 [03:27<28:09, 3.62s/it] {'loss': 1.5418, 'grad_norm': 0.0032247672988122973, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:27<28:09, 3.62s/it] 10%|█ | 54/520 [03:31<28:04, 3.61s/it] {'loss': 1.4366, 'grad_norm': 0.004578072284573, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<28:04, 3.61s/it] 11%|█ | 55/520 [03:34<28:02, 3.62s/it] {'loss': 1.4147, 'grad_norm': 0.0034419940818569577, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:34<28:02, 3.62s/it] 11%|█ | 56/520 [03:38<27:59, 3.62s/it] {'loss': 1.5499, 'grad_norm': 0.0036379993706147622, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:38<27:59, 3.62s/it] 11%|█ | 57/520 [03:42<27:54, 3.62s/it] {'loss': 1.4244, 'grad_norm': 0.005912411114323071, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<27:54, 3.62s/it] 11%|█ | 58/520 [03:45<27:48, 3.61s/it] {'loss': 1.5611, 'grad_norm': 0.002691741758833126, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:45<27:48, 3.61s/it] 11%|█▏ | 59/520 [03:49<27:44, 3.61s/it] {'loss': 1.4631, 'grad_norm': 0.006861287183405804, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:49<27:44, 3.61s/it] 12%|█▏ | 60/520 [03:52<27:37, 3.60s/it] {'loss': 1.5006, 'grad_norm': 0.005906036326180401, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:52<27:37, 3.60s/it] 12%|█▏ | 61/520 [03:56<27:35, 3.61s/it] {'loss': 1.7734, 'grad_norm': 0.013162330097676589, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:56<27:35, 3.61s/it] 12%|█▏ | 62/520 [04:00<27:29, 3.60s/it] {'loss': 1.4574, 'grad_norm': 0.004055262792502595, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:00<27:29, 3.60s/it] 12%|█▏ | 63/520 [04:03<27:27, 3.61s/it] {'loss': 1.4516, 'grad_norm': 0.004515645885562297, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:03<27:27, 3.61s/it] 12%|█▏ | 64/520 [04:07<27:24, 3.61s/it] {'loss': 1.4861, 'grad_norm': 0.0029057459560582768, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:07<27:24, 3.61s/it] 12%|█▎ | 65/520 [04:10<27:21, 3.61s/it] {'loss': 1.4924, 'grad_norm': 0.003392393276767025, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:10<27:21, 3.61s/it] 13%|█▎ | 66/520 [04:14<27:16, 3.60s/it] {'loss': 1.4478, 'grad_norm': 0.004405900792520624, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:14<27:16, 3.60s/it] 13%|█▎ | 67/520 [04:18<27:18, 3.62s/it] {'loss': 1.34, 'grad_norm': 0.002717081578674046, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:18<27:18, 3.62s/it] 13%|█▎ | 68/520 [04:21<27:13, 3.61s/it] {'loss': 1.3826, 'grad_norm': 0.002837726491537758, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:21<27:13, 3.61s/it] 13%|█▎ | 69/520 [04:25<27:10, 3.62s/it] {'loss': 1.3786, 'grad_norm': 0.004469951405028481, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:25<27:10, 3.62s/it] 13%|█▎ | 70/520 [04:29<27:12, 3.63s/it] {'loss': 1.4177, 'grad_norm': 0.002899595835153646, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:29<27:12, 3.63s/it] 14%|█▎ | 71/520 [04:32<27:06, 3.62s/it] {'loss': 1.3469, 'grad_norm': 0.0031334395567862777, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:32<27:06, 3.62s/it] 14%|█▍ | 72/520 [04:36<26:59, 3.62s/it] {'loss': 1.4912, 'grad_norm': 0.003584873633666236, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:36<26:59, 3.62s/it] 14%|█▍ | 73/520 [04:39<26:55, 3.61s/it] {'loss': 1.3188, 'grad_norm': 0.00244283220495815, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:39<26:55, 3.61s/it] 14%|█▍ | 74/520 [04:43<26:52, 3.62s/it] {'loss': 1.4395, 'grad_norm': 0.0027662092756288417, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:43<26:52, 3.62s/it] 14%|█▍ | 75/520 [04:47<26:52, 3.62s/it] {'loss': 1.3378, 'grad_norm': 0.0031032319166251314, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:47<26:52, 3.62s/it] 15%|█▍ | 76/520 [04:50<26:47, 3.62s/it] {'loss': 1.6477, 'grad_norm': 0.0037304238226317198, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:50<26:47, 3.62s/it] 15%|█▍ | 77/520 [04:54<26:42, 3.62s/it] {'loss': 1.2715, 'grad_norm': 0.0033202747685776826, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:54<26:42, 3.62s/it] 15%|█▌ | 78/520 [04:57<26:38, 3.62s/it] {'loss': 1.3921, 'grad_norm': 0.0030787489423379963, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [04:57<26:38, 3.62s/it] 15%|█▌ | 79/520 [05:01<26:33, 3.61s/it] {'loss': 1.3721, 'grad_norm': 0.0025527141312045045, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:01<26:33, 3.61s/it] 15%|█▌ | 80/520 [05:05<26:31, 3.62s/it] {'loss': 1.6617, 'grad_norm': 0.0037048349826748752, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:05<26:31, 3.62s/it] 16%|█▌ | 81/520 [05:08<26:29, 3.62s/it] {'loss': 1.5279, 'grad_norm': 0.004003173448693961, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:08<26:29, 3.62s/it] 16%|█▌ | 82/520 [05:12<26:24, 3.62s/it] {'loss': 1.4464, 'grad_norm': 0.002708509224908319, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:12<26:24, 3.62s/it] 16%|█▌ | 83/520 [05:16<26:23, 3.62s/it] {'loss': 1.4692, 'grad_norm': 0.002869889561498633, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:16<26:23, 3.62s/it] 16%|█▌ | 84/520 [05:19<26:45, 3.68s/it] {'loss': 1.4745, 'grad_norm': 0.0036736476449872164, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:19<26:45, 3.68s/it] 16%|█▋ | 85/520 [05:23<26:59, 3.72s/it] {'loss': 1.4916, 'grad_norm': 0.0028278505488070803, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:23<26:59, 3.72s/it] 17%|█▋ | 86/520 [05:27<27:10, 3.76s/it] {'loss': 1.5157, 'grad_norm': 0.003241625347421959, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:27<27:10, 3.76s/it] 17%|█▋ | 87/520 [05:31<27:16, 3.78s/it] {'loss': 1.5666, 'grad_norm': 0.0032338232082835495, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:31<27:16, 3.78s/it] 17%|█▋ | 88/520 [05:35<27:22, 3.80s/it] {'loss': 1.5617, 'grad_norm': 0.0037483292947176053, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:35<27:22, 3.80s/it] 17%|█▋ | 89/520 [05:39<27:21, 3.81s/it] {'loss': 1.4584, 'grad_norm': 0.0029225538527914274, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:39<27:21, 3.81s/it] 17%|█▋ | 90/520 [05:42<27:19, 3.81s/it] {'loss': 1.3848, 'grad_norm': 0.0026551413447586415, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:42<27:19, 3.81s/it] 18%|█▊ | 91/520 [05:46<27:16, 3.82s/it] {'loss': 1.4572, 'grad_norm': 0.002446652577218155, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:46<27:16, 3.82s/it] 18%|█▊ | 92/520 [05:50<27:15, 3.82s/it] {'loss': 1.4036, 'grad_norm': 0.002594695354243686, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:50<27:15, 3.82s/it] 18%|█▊ | 93/520 [05:54<26:51, 3.77s/it] {'loss': 1.409, 'grad_norm': 0.0028525731981404287, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:54<26:51, 3.77s/it] 18%|█▊ | 94/520 [05:57<26:33, 3.74s/it] {'loss': 1.5141, 'grad_norm': 0.0031881881303094757, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [05:57<26:33, 3.74s/it] 18%|█▊ | 95/520 [06:01<26:13, 3.70s/it] {'loss': 1.3942, 'grad_norm': 0.0031822945572043144, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:01<26:13, 3.70s/it] 18%|█▊ | 96/520 [06:05<25:59, 3.68s/it] {'loss': 1.4015, 'grad_norm': 0.0024040184420186165, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:05<25:59, 3.68s/it] 19%|█▊ | 97/520 [06:08<25:51, 3.67s/it] {'loss': 1.3742, 'grad_norm': 0.002967794190792272, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:08<25:51, 3.67s/it] 19%|█▉ | 98/520 [06:12<25:43, 3.66s/it] {'loss': 1.3642, 'grad_norm': 0.0022866866226349952, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:12<25:43, 3.66s/it] 19%|█▉ | 99/520 [06:16<25:37, 3.65s/it] {'loss': 1.3935, 'grad_norm': 0.0027735691277427756, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:16<25:37, 3.65s/it] 19%|█▉ | 100/520 [06:19<25:30, 3.64s/it] {'loss': 1.5326, 'grad_norm': 0.00535870302967845, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:19<25:30, 3.64s/it] 19%|█▉ | 101/520 [06:23<25:23, 3.64s/it] {'loss': 1.3833, 'grad_norm': 0.0025736044610295767, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:23<25:23, 3.64s/it] 20%|█▉ | 102/520 [06:26<25:17, 3.63s/it] {'loss': 1.389, 'grad_norm': 0.002691196036988077, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:26<25:17, 3.63s/it] 20%|█▉ | 103/520 [06:30<25:12, 3.63s/it] {'loss': 1.3151, 'grad_norm': 0.002361842280204309, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:30<25:12, 3.63s/it] 20%|██ | 104/520 [06:34<25:07, 3.62s/it] {'loss': 1.396, 'grad_norm': 0.0025485782545504594, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:34<25:07, 3.62s/it] 20%|██ | 105/520 [06:37<25:11, 3.64s/it] {'loss': 1.3846, 'grad_norm': 0.002239668049335682, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:37<25:11, 3.64s/it] 20%|██ | 106/520 [06:41<25:07, 3.64s/it] {'loss': 1.4788, 'grad_norm': 0.0026549548801027513, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:41<25:07, 3.64s/it] 21%|██ | 107/520 [06:45<25:02, 3.64s/it] {'loss': 1.4641, 'grad_norm': 0.003331625720039148, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:45<25:02, 3.64s/it] 21%|██ | 108/520 [06:48<25:01, 3.64s/it] {'loss': 1.3405, 'grad_norm': 0.002680071806877089, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:48<25:01, 3.64s/it] 21%|██ | 109/520 [06:52<24:56, 3.64s/it] {'loss': 1.43, 'grad_norm': 0.002355084623797403, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:52<24:56, 3.64s/it] 21%|██ | 110/520 [06:56<24:55, 3.65s/it] {'loss': 1.5409, 'grad_norm': 0.002572519559259541, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [06:56<24:55, 3.65s/it] 21%|██▏ | 111/520 [06:59<24:50, 3.65s/it] {'loss': 1.5437, 'grad_norm': 0.002754690396314253, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [06:59<24:50, 3.65s/it] 22%|██▏ | 112/520 [07:03<24:43, 3.64s/it] {'loss': 1.4291, 'grad_norm': 0.0024931116156896587, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:03<24:43, 3.64s/it] 22%|██▏ | 113/520 [07:06<24:34, 3.62s/it] {'loss': 1.2889, 'grad_norm': 0.0021459563819420163, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:06<24:34, 3.62s/it] 22%|██▏ | 114/520 [07:10<24:30, 3.62s/it] {'loss': 1.3969, 'grad_norm': 0.00237889027384386, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:10<24:30, 3.62s/it] 22%|██▏ | 115/520 [07:14<24:46, 3.67s/it] {'loss': 1.5127, 'grad_norm': 0.002412737963397371, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:14<24:46, 3.67s/it] 22%|██▏ | 116/520 [07:18<24:57, 3.71s/it] {'loss': 1.5099, 'grad_norm': 0.0021319107410598735, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:18<24:57, 3.71s/it] 22%|██▎ | 117/520 [07:21<25:00, 3.72s/it] {'loss': 1.4925, 'grad_norm': 0.0026410715893894717, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:21<25:00, 3.72s/it] 23%|██▎ | 118/520 [07:25<25:02, 3.74s/it] {'loss': 1.3683, 'grad_norm': 0.0020578996776439507, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:25<25:02, 3.74s/it] 23%|██▎ | 119/520 [07:29<25:01, 3.74s/it] {'loss': 1.323, 'grad_norm': 0.0022241446323101395, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:29<25:01, 3.74s/it] 23%|██▎ | 120/520 [07:33<25:01, 3.75s/it] {'loss': 1.346, 'grad_norm': 0.002868117173331857, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:33<25:01, 3.75s/it] 23%|██▎ | 121/520 [07:36<24:57, 3.75s/it] {'loss': 1.4132, 'grad_norm': 0.002663167507406432, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:36<24:57, 3.75s/it] 23%|██▎ | 122/520 [07:40<24:55, 3.76s/it] {'loss': 1.3, 'grad_norm': 0.002267197807237074, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:40<24:55, 3.76s/it] 24%|██▎ | 123/520 [07:44<24:51, 3.76s/it] {'loss': 1.5108, 'grad_norm': 0.0027294446169487717, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:44<24:51, 3.76s/it] 24%|██▍ | 124/520 [07:48<24:50, 3.76s/it] {'loss': 1.3839, 'grad_norm': 0.0026856458207955647, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:48<24:50, 3.76s/it] 24%|██▍ | 125/520 [07:51<24:46, 3.76s/it] {'loss': 1.3726, 'grad_norm': 0.002611928723796333, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:51<24:46, 3.76s/it] 24%|██▍ | 126/520 [07:56<26:00, 3.96s/it] {'loss': 1.4213, 'grad_norm': 0.0020589078158833583, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:56<26:00, 3.96s/it] 24%|██▍ | 127/520 [08:00<25:34, 3.90s/it] {'loss': 1.3478, 'grad_norm': 0.0029067364718335866, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:00<25:34, 3.90s/it] 25%|██▍ | 128/520 [08:03<25:14, 3.86s/it] {'loss': 1.406, 'grad_norm': 0.002440461342424874, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:03<25:14, 3.86s/it] 25%|██▍ | 129/520 [08:07<24:59, 3.83s/it] {'loss': 1.3237, 'grad_norm': 0.002228543319685248, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:07<24:59, 3.83s/it] 25%|██▌ | 130/520 [08:11<24:46, 3.81s/it] {'loss': 1.3875, 'grad_norm': 0.0022373233818152325, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:11<24:46, 3.81s/it] 25%|██▌ | 131/520 [08:15<24:42, 3.81s/it] {'loss': 1.3935, 'grad_norm': 0.002821541494486497, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:15<24:42, 3.81s/it] 25%|██▌ | 132/520 [08:18<24:31, 3.79s/it] {'loss': 1.4297, 'grad_norm': 0.0024319332606161947, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:19<24:31, 3.79s/it] 26%|██▌ | 133/520 [08:22<24:26, 3.79s/it] {'loss': 1.3323, 'grad_norm': 0.002325255767285862, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:22<24:26, 3.79s/it] 26%|██▌ | 134/520 [08:26<24:24, 3.79s/it] {'loss': 1.4242, 'grad_norm': 0.0027665363223171938, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:26<24:24, 3.79s/it] 26%|██▌ | 135/520 [08:30<24:16, 3.78s/it] {'loss': 1.4981, 'grad_norm': 0.002368160830918402, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:30<24:16, 3.78s/it] 26%|██▌ | 136/520 [08:34<24:09, 3.78s/it] {'loss': 1.4175, 'grad_norm': 0.002518014286324827, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:34<24:09, 3.78s/it] 26%|██▋ | 137/520 [08:37<24:04, 3.77s/it] {'loss': 1.34, 'grad_norm': 0.0026072583537331373, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:37<24:04, 3.77s/it] 27%|██▋ | 138/520 [08:41<23:59, 3.77s/it] {'loss': 1.3368, 'grad_norm': 0.002163548768077282, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:41<23:59, 3.77s/it] 27%|██▋ | 139/520 [08:45<23:54, 3.77s/it] {'loss': 1.3002, 'grad_norm': 0.0030105682408722674, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:45<23:54, 3.77s/it] 27%|██▋ | 140/520 [08:49<23:51, 3.77s/it] {'loss': 1.4349, 'grad_norm': 0.0023710325257252605, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:49<23:51, 3.77s/it] 27%|██▋ | 141/520 [08:52<23:48, 3.77s/it] {'loss': 1.4584, 'grad_norm': 0.002145055138617807, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:52<23:48, 3.77s/it] 27%|██▋ | 142/520 [08:56<23:46, 3.77s/it] {'loss': 1.474, 'grad_norm': 0.002212223839350567, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:56<23:46, 3.77s/it] 28%|██▊ | 143/520 [09:00<23:41, 3.77s/it] {'loss': 1.3748, 'grad_norm': 0.002510249058619536, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:00<23:41, 3.77s/it] 28%|██▊ | 144/520 [09:04<23:41, 3.78s/it] {'loss': 1.3231, 'grad_norm': 0.00237388121148134, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:04<23:41, 3.78s/it] 28%|██▊ | 145/520 [09:08<23:39, 3.79s/it] {'loss': 1.2615, 'grad_norm': 0.002199117653512814, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:08<23:39, 3.79s/it] 28%|██▊ | 146/520 [09:11<23:36, 3.79s/it] {'loss': 1.5111, 'grad_norm': 0.0023982153148719393, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:11<23:36, 3.79s/it] 28%|██▊ | 147/520 [09:15<23:36, 3.80s/it] {'loss': 1.3075, 'grad_norm': 0.0022540968432236243, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:15<23:36, 3.80s/it] 28%|██▊ | 148/520 [09:19<23:35, 3.81s/it] {'loss': 1.3441, 'grad_norm': 0.002216636194717081, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:19<23:35, 3.81s/it] 29%|██▊ | 149/520 [09:23<23:34, 3.81s/it] {'loss': 1.2895, 'grad_norm': 0.0022812660934899754, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:23<23:34, 3.81s/it] 29%|██▉ | 150/520 [09:27<23:37, 3.83s/it] {'loss': 1.5249, 'grad_norm': 0.002437295422730031, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:27<23:37, 3.83s/it] 29%|██▉ | 151/520 [09:31<23:32, 3.83s/it] {'loss': 1.3282, 'grad_norm': 0.002192403335487861, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:31<23:32, 3.83s/it] 29%|██▉ | 152/520 [09:34<23:28, 3.83s/it] {'loss': 1.2985, 'grad_norm': 0.002237790499783139, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:34<23:28, 3.83s/it] 29%|██▉ | 153/520 [09:38<23:22, 3.82s/it] {'loss': 1.333, 'grad_norm': 0.0022221694727749836, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:38<23:22, 3.82s/it] 30%|██▉ | 154/520 [09:42<23:18, 3.82s/it] {'loss': 1.4266, 'grad_norm': 0.0021687352555065592, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:42<23:18, 3.82s/it] 30%|██▉ | 155/520 [09:46<22:59, 3.78s/it] {'loss': 1.3314, 'grad_norm': 0.002229831574935595, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:46<22:59, 3.78s/it] 30%|███ | 156/520 [09:49<22:40, 3.74s/it] {'loss': 1.364, 'grad_norm': 0.0028086059558837446, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:49<22:40, 3.74s/it] 30%|███ | 157/520 [09:53<22:25, 3.71s/it] {'loss': 1.5065, 'grad_norm': 0.0024190149009587, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:53<22:25, 3.71s/it] 30%|███ | 158/520 [09:57<22:17, 3.69s/it] {'loss': 1.3354, 'grad_norm': 0.002561940473684683, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:57<22:17, 3.69s/it] 31%|███ | 159/520 [10:00<22:07, 3.68s/it] {'loss': 1.3742, 'grad_norm': 0.002293298379736356, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:00<22:07, 3.68s/it] 31%|███ | 160/520 [10:04<22:03, 3.68s/it] {'loss': 1.3944, 'grad_norm': 0.002312865915459973, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:04<22:03, 3.68s/it] 31%|███ | 161/520 [10:08<22:04, 3.69s/it] {'loss': 1.3712, 'grad_norm': 0.0021646973095666416, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:08<22:04, 3.69s/it] 31%|███ | 162/520 [10:11<21:58, 3.68s/it] {'loss': 1.4648, 'grad_norm': 0.0026465710816923928, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:11<21:58, 3.68s/it] 31%|███▏ | 163/520 [10:15<21:52, 3.68s/it] {'loss': 1.2547, 'grad_norm': 0.0029077376118634827, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:15<21:52, 3.68s/it] 32%|███▏ | 164/520 [10:19<21:47, 3.67s/it] {'loss': 1.2236, 'grad_norm': 0.0021610884843130396, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:19<21:47, 3.67s/it] 32%|███▏ | 165/520 [10:22<21:40, 3.66s/it] {'loss': 1.3616, 'grad_norm': 0.002109865851016254, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:22<21:40, 3.66s/it] 32%|███▏ | 166/520 [10:26<21:37, 3.66s/it] {'loss': 1.3613, 'grad_norm': 0.0024568113375826287, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:26<21:37, 3.66s/it] 32%|███▏ | 167/520 [10:30<21:32, 3.66s/it] {'loss': 1.3454, 'grad_norm': 0.002525436654919561, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:30<21:32, 3.66s/it] 32%|███▏ | 168/520 [10:33<21:27, 3.66s/it] {'loss': 1.2768, 'grad_norm': 0.0022423917280161576, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:33<21:27, 3.66s/it] 32%|███▎ | 169/520 [10:37<21:28, 3.67s/it] {'loss': 1.3635, 'grad_norm': 0.0021141104014690587, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:37<21:28, 3.67s/it] 33%|███▎ | 170/520 [10:41<21:24, 3.67s/it] {'loss': 1.354, 'grad_norm': 0.0027262221192267226, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:41<21:24, 3.67s/it] 33%|███▎ | 171/520 [10:44<21:24, 3.68s/it] {'loss': 1.292, 'grad_norm': 0.0024619934103045325, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:44<21:24, 3.68s/it] 33%|███▎ | 172/520 [10:48<21:24, 3.69s/it] {'loss': 1.3554, 'grad_norm': 0.0020861889070723544, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:48<21:24, 3.69s/it] 33%|███▎ | 173/520 [10:52<21:25, 3.70s/it] {'loss': 1.2965, 'grad_norm': 0.002264091542090425, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:52<21:25, 3.70s/it] 33%|███▎ | 174/520 [10:56<21:33, 3.74s/it] {'loss': 1.3674, 'grad_norm': 0.002527115925906912, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:56<21:33, 3.74s/it] 34%|███▎ | 175/520 [10:59<21:37, 3.76s/it] {'loss': 1.2639, 'grad_norm': 0.0021188495023451628, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [10:59<21:37, 3.76s/it] 34%|███▍ | 176/520 [11:03<21:37, 3.77s/it] {'loss': 1.4482, 'grad_norm': 0.0021797743177492516, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:03<21:37, 3.77s/it] 34%|███▍ | 177/520 [11:07<21:38, 3.79s/it] {'loss': 1.3139, 'grad_norm': 0.002593469632563015, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:07<21:38, 3.79s/it] 34%|███▍ | 178/520 [11:11<21:32, 3.78s/it] {'loss': 1.3399, 'grad_norm': 0.0023765263788671975, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:11<21:32, 3.78s/it] 34%|███▍ | 179/520 [11:15<21:32, 3.79s/it] {'loss': 1.4253, 'grad_norm': 0.0021523822768221013, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:15<21:32, 3.79s/it] 35%|███▍ | 180/520 [11:18<21:26, 3.79s/it] {'loss': 1.3356, 'grad_norm': 0.0023544731566700144, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:18<21:26, 3.79s/it] 35%|███▍ | 181/520 [11:22<21:24, 3.79s/it] {'loss': 1.3087, 'grad_norm': 0.0021811336735808938, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:22<21:24, 3.79s/it] 35%|███▌ | 182/520 [11:26<21:22, 3.79s/it] {'loss': 1.3181, 'grad_norm': 0.002307839250679543, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:26<21:22, 3.79s/it] 35%|███▌ | 183/520 [11:30<21:18, 3.79s/it] {'loss': 1.3496, 'grad_norm': 0.0021850942760041217, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:30<21:18, 3.79s/it] 35%|███▌ | 184/520 [11:34<21:14, 3.79s/it] {'loss': 1.2577, 'grad_norm': 0.002107492566205958, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:34<21:14, 3.79s/it] 36%|███▌ | 185/520 [11:37<21:10, 3.79s/it] {'loss': 1.4374, 'grad_norm': 0.002087820678077251, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:37<21:10, 3.79s/it] 36%|███▌ | 186/520 [11:41<21:10, 3.80s/it] {'loss': 1.2871, 'grad_norm': 0.0021290273039810004, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:41<21:10, 3.80s/it] 36%|███▌ | 187/520 [11:45<20:48, 3.75s/it] {'loss': 1.3018, 'grad_norm': 0.0026843497470683576, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:45<20:48, 3.75s/it] 36%|███▌ | 188/520 [11:48<20:29, 3.70s/it] {'loss': 1.3784, 'grad_norm': 0.002427804704800269, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:48<20:29, 3.70s/it] 36%|███▋ | 189/520 [11:52<20:16, 3.68s/it] {'loss': 1.3877, 'grad_norm': 0.0019825429308779406, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:52<20:16, 3.68s/it] 37%|███▋ | 190/520 [11:56<20:07, 3.66s/it] {'loss': 1.3065, 'grad_norm': 0.0024204354161982103, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:56<20:07, 3.66s/it] 37%|███▋ | 191/520 [11:59<20:00, 3.65s/it] {'loss': 1.2568, 'grad_norm': 0.0021153720651371287, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [11:59<20:00, 3.65s/it] 37%|███▋ | 192/520 [12:03<19:55, 3.64s/it] {'loss': 1.3442, 'grad_norm': 0.002172365119551109, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:03<19:55, 3.64s/it] 37%|███▋ | 193/520 [12:07<19:52, 3.65s/it] {'loss': 1.3813, 'grad_norm': 0.002532037641521933, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:07<19:52, 3.65s/it] 37%|███▋ | 194/520 [12:10<19:51, 3.65s/it] {'loss': 1.2544, 'grad_norm': 0.002180617135413538, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:10<19:51, 3.65s/it] 38%|███▊ | 195/520 [12:14<19:43, 3.64s/it] {'loss': 1.3636, 'grad_norm': 0.002236474961530773, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:14<19:43, 3.64s/it] 38%|███▊ | 196/520 [12:17<19:39, 3.64s/it] {'loss': 1.3314, 'grad_norm': 0.0023873785008953334, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:17<19:39, 3.64s/it] 38%|███▊ | 197/520 [12:21<19:41, 3.66s/it] {'loss': 1.2928, 'grad_norm': 0.002160430744542351, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:21<19:41, 3.66s/it] 38%|███▊ | 198/520 [12:25<19:40, 3.67s/it] {'loss': 1.3677, 'grad_norm': 0.0023437854560731616, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:25<19:40, 3.67s/it] 38%|███▊ | 199/520 [12:29<19:38, 3.67s/it] {'loss': 1.2766, 'grad_norm': 0.0023211639235344657, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:29<19:38, 3.67s/it] 38%|███▊ | 200/520 [12:32<19:36, 3.68s/it] {'loss': 1.312, 'grad_norm': 0.0022359983347700267, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:32<19:36, 3.68s/it] 39%|███▊ | 201/520 [12:36<19:36, 3.69s/it] {'loss': 1.326, 'grad_norm': 0.0020264636204748933, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:36<19:36, 3.69s/it] 39%|███▉ | 202/520 [12:40<19:38, 3.70s/it] {'loss': 1.2727, 'grad_norm': 0.0021024781665521185, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:40<19:38, 3.70s/it] 39%|███▉ | 203/520 [12:43<19:29, 3.69s/it] {'loss': 1.3313, 'grad_norm': 0.002251602259264282, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:43<19:29, 3.69s/it] 39%|███▉ | 204/520 [12:47<19:25, 3.69s/it] {'loss': 1.3535, 'grad_norm': 0.0023288640332101086, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:47<19:25, 3.69s/it] 39%|███▉ | 205/520 [12:51<19:22, 3.69s/it] {'loss': 1.3377, 'grad_norm': 0.0022115758805100754, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:51<19:22, 3.69s/it] 40%|███▉ | 206/520 [12:54<19:18, 3.69s/it] {'loss': 1.3927, 'grad_norm': 0.0021260443593697992, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:54<19:18, 3.69s/it] 40%|███▉ | 207/520 [12:58<19:13, 3.69s/it] {'loss': 1.3132, 'grad_norm': 0.0020390546233874166, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [12:58<19:13, 3.69s/it] 40%|████ | 208/520 [13:02<19:08, 3.68s/it] {'loss': 1.3651, 'grad_norm': 0.0024791319880799662, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:02<19:08, 3.68s/it] 40%|████ | 209/520 [13:05<19:04, 3.68s/it] {'loss': 1.283, 'grad_norm': 0.002070878470397428, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:05<19:04, 3.68s/it] 40%|████ | 210/520 [13:09<19:00, 3.68s/it] {'loss': 1.3636, 'grad_norm': 0.002320708139632779, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:09<19:00, 3.68s/it] 41%|████ | 211/520 [13:13<18:57, 3.68s/it] {'loss': 1.3689, 'grad_norm': 0.001974799792312848, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:13<18:57, 3.68s/it] 41%|████ | 212/520 [13:16<18:53, 3.68s/it] {'loss': 1.3364, 'grad_norm': 0.0019972422361099933, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:16<18:53, 3.68s/it] 41%|████ | 213/520 [13:20<18:48, 3.68s/it] {'loss': 1.3082, 'grad_norm': 0.00262934438175871, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:20<18:48, 3.68s/it] 41%|████ | 214/520 [13:24<18:38, 3.66s/it] {'loss': 1.2942, 'grad_norm': 0.0022391597079420285, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:24<18:38, 3.66s/it] 41%|████▏ | 215/520 [13:27<18:42, 3.68s/it] {'loss': 1.2485, 'grad_norm': 0.002127064565085528, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:27<18:42, 3.68s/it] 42%|████▏ | 216/520 [13:31<18:46, 3.70s/it] {'loss': 1.2077, 'grad_norm': 0.002077670193006619, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:31<18:46, 3.70s/it] 42%|████▏ | 217/520 [13:35<18:50, 3.73s/it] {'loss': 1.3285, 'grad_norm': 0.002295307268881222, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:35<18:50, 3.73s/it] 42%|████▏ | 218/520 [13:39<18:48, 3.74s/it] {'loss': 1.3198, 'grad_norm': 0.0023048265895311447, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:39<18:48, 3.74s/it] 42%|████▏ | 219/520 [13:42<18:36, 3.71s/it] {'loss': 1.3029, 'grad_norm': 0.0019069075369801314, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:42<18:36, 3.71s/it] 42%|████▏ | 220/520 [13:46<18:29, 3.70s/it] {'loss': 1.3082, 'grad_norm': 0.002179608874128007, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:46<18:29, 3.70s/it] 42%|████▎ | 221/520 [13:50<18:22, 3.69s/it] {'loss': 1.3317, 'grad_norm': 0.0022470548341017938, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:50<18:22, 3.69s/it] 43%|████▎ | 222/520 [13:53<18:17, 3.68s/it] {'loss': 1.2463, 'grad_norm': 0.0019971954953428937, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:53<18:17, 3.68s/it] 43%|████▎ | 223/520 [13:57<18:12, 3.68s/it] {'loss': 1.236, 'grad_norm': 0.0019359753765001196, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [13:57<18:12, 3.68s/it] 43%|████▎ | 224/520 [14:01<18:10, 3.68s/it] {'loss': 1.456, 'grad_norm': 0.003238708392027745, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:01<18:10, 3.68s/it] 43%|████▎ | 225/520 [14:04<18:04, 3.68s/it] {'loss': 1.2606, 'grad_norm': 0.0020815397913798166, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:04<18:04, 3.68s/it] 43%|████▎ | 226/520 [14:08<17:54, 3.66s/it] {'loss': 1.3589, 'grad_norm': 0.001961110194635548, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:08<17:54, 3.66s/it] 44%|████▎ | 227/520 [14:12<17:49, 3.65s/it] {'loss': 1.3453, 'grad_norm': 0.0020058380919361967, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:12<17:49, 3.65s/it] 44%|████▍ | 228/520 [14:15<17:47, 3.65s/it] {'loss': 1.4522, 'grad_norm': 0.0023189177798290436, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:15<17:47, 3.65s/it] 44%|████▍ | 229/520 [14:19<17:37, 3.63s/it] {'loss': 1.317, 'grad_norm': 0.0019329014230795342, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:19<17:37, 3.63s/it] 44%|████▍ | 230/520 [14:23<17:33, 3.63s/it] {'loss': 1.1998, 'grad_norm': 0.0021537209004649765, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:23<17:33, 3.63s/it] 44%|████▍ | 231/520 [14:26<17:27, 3.63s/it] {'loss': 1.2634, 'grad_norm': 0.0018679014610460103, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:26<17:27, 3.63s/it] 45%|████▍ | 232/520 [14:30<17:24, 3.63s/it] {'loss': 1.4697, 'grad_norm': 0.0023054496967337343, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:30<17:24, 3.63s/it] 45%|████▍ | 233/520 [14:33<17:19, 3.62s/it] {'loss': 1.3453, 'grad_norm': 0.0023216219383365945, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:33<17:19, 3.62s/it] 45%|████▌ | 234/520 [14:37<17:15, 3.62s/it] {'loss': 1.2095, 'grad_norm': 0.0020927441905645803, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:37<17:15, 3.62s/it] 45%|████▌ | 235/520 [14:41<17:14, 3.63s/it] {'loss': 1.2653, 'grad_norm': 0.0021748463334451537, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:41<17:14, 3.63s/it] 45%|████▌ | 236/520 [14:44<17:09, 3.62s/it] {'loss': 1.3719, 'grad_norm': 0.0019053798995679134, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:44<17:09, 3.62s/it] 46%|████▌ | 237/520 [14:48<17:06, 3.63s/it] {'loss': 1.3415, 'grad_norm': 0.0020812261194433977, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:48<17:06, 3.63s/it] 46%|████▌ | 238/520 [14:52<17:02, 3.63s/it] {'loss': 1.2812, 'grad_norm': 0.0021861444111842586, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:52<17:02, 3.63s/it] 46%|████▌ | 239/520 [14:55<16:59, 3.63s/it] {'loss': 1.3773, 'grad_norm': 0.0021378634275657383, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [14:55<16:59, 3.63s/it] 46%|████▌ | 240/520 [14:59<16:55, 3.63s/it] {'loss': 1.152, 'grad_norm': 0.0022511735726534585, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [14:59<16:55, 3.63s/it] 46%|████▋ | 241/520 [15:02<16:51, 3.63s/it] {'loss': 1.2425, 'grad_norm': 0.0020413222184530026, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:02<16:51, 3.63s/it] 47%|████▋ | 242/520 [15:06<16:48, 3.63s/it] {'loss': 1.2656, 'grad_norm': 0.0019244118789421567, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:06<16:48, 3.63s/it] 47%|████▋ | 243/520 [15:10<16:44, 3.63s/it] {'loss': 1.2474, 'grad_norm': 0.0020327964768809582, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:10<16:44, 3.63s/it] 47%|████▋ | 244/520 [15:13<16:43, 3.64s/it] {'loss': 1.3815, 'grad_norm': 0.0021151376248155036, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:13<16:43, 3.64s/it] 47%|████▋ | 245/520 [15:17<16:54, 3.69s/it] {'loss': 1.2341, 'grad_norm': 0.0020417864390434035, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:17<16:54, 3.69s/it] 47%|████▋ | 246/520 [15:21<17:01, 3.73s/it] {'loss': 1.4406, 'grad_norm': 0.002229333408501363, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:21<17:01, 3.73s/it] 48%|████▊ | 247/520 [15:25<17:07, 3.76s/it] {'loss': 1.424, 'grad_norm': 0.0020765127091838358, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:25<17:07, 3.76s/it] 48%|████▊ | 248/520 [15:29<17:08, 3.78s/it] {'loss': 1.2444, 'grad_norm': 0.002153428767697166, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:29<17:08, 3.78s/it] 48%|████▊ | 249/520 [15:32<17:08, 3.79s/it] {'loss': 1.3414, 'grad_norm': 0.002060860519187018, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:32<17:08, 3.79s/it] 48%|████▊ | 250/520 [15:36<17:08, 3.81s/it] {'loss': 1.2767, 'grad_norm': 0.0022267948527886394, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:36<17:08, 3.81s/it] 48%|████▊ | 251/520 [15:40<17:06, 3.81s/it] {'loss': 1.3437, 'grad_norm': 0.0018716767871234507, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:40<17:06, 3.81s/it] 48%|████▊ | 252/520 [15:44<17:04, 3.82s/it] {'loss': 1.326, 'grad_norm': 0.002030441559102651, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:44<17:04, 3.82s/it] 49%|████▊ | 253/520 [15:48<17:04, 3.84s/it] {'loss': 1.3409, 'grad_norm': 0.002175507389237098, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:48<17:04, 3.84s/it] 49%|████▉ | 254/520 [15:52<17:02, 3.84s/it] {'loss': 1.2629, 'grad_norm': 0.001978091778157007, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:52<17:02, 3.84s/it] 49%|████▉ | 255/520 [15:56<17:02, 3.86s/it] {'loss': 1.2749, 'grad_norm': 0.0023056659695307297, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [15:56<17:02, 3.86s/it] 49%|████▉ | 256/520 [15:59<16:54, 3.84s/it] {'loss': 1.3211, 'grad_norm': 0.002206837428110686, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [15:59<16:54, 3.84s/it] 49%|████▉ | 257/520 [16:03<16:51, 3.85s/it] {'loss': 1.304, 'grad_norm': 0.002120436440624749, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:03<16:51, 3.85s/it] 50%|████▉ | 258/520 [16:07<16:48, 3.85s/it] {'loss': 1.317, 'grad_norm': 0.0018556801334997336, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:07<16:48, 3.85s/it] 50%|████▉ | 259/520 [16:11<16:43, 3.85s/it] {'loss': 1.3804, 'grad_norm': 0.0023593405182575225, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:11<16:43, 3.85s/it] 50%|█████ | 260/520 [16:15<16:40, 3.85s/it] {'loss': 1.3994, 'grad_norm': 0.0018746689336401217, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:15<16:40, 3.85s/it] 50%|█████ | 261/520 [16:19<16:35, 3.84s/it] {'loss': 1.3318, 'grad_norm': 0.0020557717776259737, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:19<16:35, 3.84s/it] 50%|█████ | 262/520 [16:22<16:30, 3.84s/it] {'loss': 1.2285, 'grad_norm': 0.002078332872524269, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:22<16:30, 3.84s/it] 51%|█████ | 263/520 [16:26<16:27, 3.84s/it] {'loss': 1.339, 'grad_norm': 0.002171874892116567, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:26<16:27, 3.84s/it] 51%|█████ | 264/520 [16:30<16:23, 3.84s/it] {'loss': 1.3473, 'grad_norm': 0.002047828106306195, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:30<16:23, 3.84s/it] 51%|█████ | 265/520 [16:34<16:24, 3.86s/it] {'loss': 1.2488, 'grad_norm': 0.0023706170470327737, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:34<16:24, 3.86s/it] 51%|█████ | 266/520 [16:38<16:12, 3.83s/it] {'loss': 1.104, 'grad_norm': 0.0017691622330963889, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:38<16:12, 3.83s/it] 51%|█████▏ | 267/520 [16:41<15:55, 3.78s/it] {'loss': 1.2425, 'grad_norm': 0.0019645521979066026, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:41<15:55, 3.78s/it] 52%|█████▏ | 268/520 [16:45<15:47, 3.76s/it] {'loss': 1.4602, 'grad_norm': 0.002700568213574095, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:45<15:47, 3.76s/it] 52%|█████▏ | 269/520 [16:49<15:36, 3.73s/it] {'loss': 1.3506, 'grad_norm': 0.0020813936202519166, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:49<15:36, 3.73s/it] 52%|█████▏ | 270/520 [16:53<15:32, 3.73s/it] {'loss': 1.2622, 'grad_norm': 0.001987347445615377, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:53<15:32, 3.73s/it] 52%|█████▏ | 271/520 [16:56<15:29, 3.73s/it] {'loss': 1.3412, 'grad_norm': 0.0021462548359150752, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [16:56<15:29, 3.73s/it] 52%|█████▏ | 272/520 [17:00<15:23, 3.72s/it] {'loss': 1.281, 'grad_norm': 0.002210829156188893, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:00<15:23, 3.72s/it] 52%|█████▎ | 273/520 [17:04<15:14, 3.70s/it] {'loss': 1.4235, 'grad_norm': 0.002778790199072734, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:04<15:14, 3.70s/it] 53%|█████▎ | 274/520 [17:07<15:08, 3.69s/it] {'loss': 1.2977, 'grad_norm': 0.002331618494071742, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:07<15:08, 3.69s/it] 53%|█████▎ | 275/520 [17:11<15:02, 3.68s/it] {'loss': 1.2456, 'grad_norm': 0.0020719377884769884, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:11<15:02, 3.68s/it] 53%|█████▎ | 276/520 [17:15<14:56, 3.67s/it] {'loss': 1.3233, 'grad_norm': 0.0022576912258441967, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:15<14:56, 3.67s/it] 53%|█████▎ | 277/520 [17:18<14:56, 3.69s/it] {'loss': 1.4, 'grad_norm': 0.0022945690434602733, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:18<14:56, 3.69s/it] 53%|█████▎ | 278/520 [17:22<14:52, 3.69s/it] {'loss': 1.1983, 'grad_norm': 0.002024470132071202, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:22<14:52, 3.69s/it] 54%|█████▎ | 279/520 [17:26<14:49, 3.69s/it] {'loss': 1.2875, 'grad_norm': 0.0024491244711725433, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:26<14:49, 3.69s/it] 54%|█████▍ | 280/520 [17:29<14:45, 3.69s/it] {'loss': 1.2459, 'grad_norm': 0.002359579832211512, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:29<14:45, 3.69s/it] 54%|█████▍ | 281/520 [17:33<14:39, 3.68s/it] {'loss': 1.3567, 'grad_norm': 0.002219969359103508, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:33<14:39, 3.68s/it] 54%|█████▍ | 282/520 [17:37<14:34, 3.67s/it] {'loss': 1.2066, 'grad_norm': 0.0019590824906629252, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:37<14:34, 3.67s/it] 54%|█████▍ | 283/520 [17:40<14:29, 3.67s/it] {'loss': 1.3735, 'grad_norm': 0.0023105205071872713, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:40<14:29, 3.67s/it] 55%|█████▍ | 284/520 [17:44<14:26, 3.67s/it] {'loss': 1.2576, 'grad_norm': 0.002254858136432865, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:44<14:26, 3.67s/it] 55%|█████▍ | 285/520 [17:48<14:19, 3.66s/it] {'loss': 1.2393, 'grad_norm': 0.0021302289624230697, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:48<14:19, 3.66s/it] 55%|█████▌ | 286/520 [17:51<14:14, 3.65s/it] {'loss': 1.1064, 'grad_norm': 0.0021486488978916317, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:51<14:14, 3.65s/it] 55%|█████▌ | 287/520 [17:55<14:09, 3.64s/it] {'loss': 1.3501, 'grad_norm': 0.0022518331124868787, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [17:55<14:09, 3.64s/it] 55%|█████▌ | 288/520 [17:59<14:09, 3.66s/it] {'loss': 1.3918, 'grad_norm': 0.002209520254690369, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [17:59<14:09, 3.66s/it] 56%|█████▌ | 289/520 [18:02<14:04, 3.66s/it] {'loss': 1.2511, 'grad_norm': 0.0019360471322869366, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:02<14:04, 3.66s/it] 56%|█████▌ | 290/520 [18:06<14:02, 3.66s/it] {'loss': 1.1712, 'grad_norm': 0.001981691759042664, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:06<14:02, 3.66s/it] 56%|█████▌ | 291/520 [18:10<13:56, 3.65s/it] {'loss': 1.2325, 'grad_norm': 0.0021557412804455245, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:10<13:56, 3.65s/it] 56%|█████▌ | 292/520 [18:13<13:50, 3.64s/it] {'loss': 1.2793, 'grad_norm': 0.0020272998730880795, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:13<13:50, 3.64s/it] 56%|█████▋ | 293/520 [18:17<13:46, 3.64s/it] {'loss': 1.2189, 'grad_norm': 0.002232527511931201, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:17<13:46, 3.64s/it] 57%|█████▋ | 294/520 [18:21<13:44, 3.65s/it] {'loss': 1.2454, 'grad_norm': 0.0022610398543083457, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:21<13:44, 3.65s/it] 57%|█████▋ | 295/520 [18:24<13:40, 3.65s/it] {'loss': 1.3331, 'grad_norm': 0.0027445550071975343, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:24<13:40, 3.65s/it] 57%|█████▋ | 296/520 [18:28<13:43, 3.67s/it] {'loss': 1.1944, 'grad_norm': 0.0022013722284695792, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:28<13:43, 3.67s/it] 57%|█████▋ | 297/520 [18:32<13:48, 3.71s/it] {'loss': 1.3172, 'grad_norm': 0.0022217055797641946, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:32<13:48, 3.71s/it] 57%|█████▋ | 298/520 [18:36<13:51, 3.75s/it] {'loss': 1.2855, 'grad_norm': 0.0018354284119748926, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:36<13:51, 3.75s/it] 57%|█████▊ | 299/520 [18:39<13:53, 3.77s/it] {'loss': 1.345, 'grad_norm': 0.0020578800546031065, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:39<13:53, 3.77s/it] 58%|█████▊ | 300/520 [18:43<13:52, 3.79s/it] {'loss': 1.3402, 'grad_norm': 0.002056990040438072, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:43<13:52, 3.79s/it] 58%|█████▊ | 301/520 [18:47<13:51, 3.80s/it] {'loss': 1.3137, 'grad_norm': 0.0021084308882082037, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:47<13:51, 3.80s/it] 58%|█████▊ | 302/520 [18:51<13:49, 3.81s/it] {'loss': 1.3696, 'grad_norm': 0.0020910079673125076, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:51<13:49, 3.81s/it] 58%|█████▊ | 303/520 [18:55<13:47, 3.82s/it] {'loss': 1.2462, 'grad_norm': 0.002274378462479428, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:55<13:47, 3.82s/it] 58%|█████▊ | 304/520 [18:59<13:45, 3.82s/it] {'loss': 1.2576, 'grad_norm': 0.002255555871998021, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [18:59<13:45, 3.82s/it] 59%|█████▊ | 305/520 [19:02<13:43, 3.83s/it] {'loss': 1.3556, 'grad_norm': 0.0023430745757280816, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:02<13:43, 3.83s/it] 59%|█████▉ | 306/520 [19:06<13:40, 3.84s/it] {'loss': 1.2931, 'grad_norm': 0.0019959230805895747, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:06<13:40, 3.84s/it] 59%|█████▉ | 307/520 [19:10<13:55, 3.92s/it] {'loss': 1.2302, 'grad_norm': 0.0018768387970636879, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:10<13:55, 3.92s/it] 59%|█████▉ | 308/520 [19:14<13:35, 3.85s/it] {'loss': 1.3505, 'grad_norm': 0.0020203295138830173, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:14<13:35, 3.85s/it] 59%|█████▉ | 309/520 [19:18<13:19, 3.79s/it] {'loss': 1.2291, 'grad_norm': 0.0019374873494105746, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:18<13:19, 3.79s/it] 60%|█████▉ | 310/520 [19:21<13:06, 3.74s/it] {'loss': 1.2054, 'grad_norm': 0.001991546731357845, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:21<13:06, 3.74s/it] 60%|█████▉ | 311/520 [19:25<12:57, 3.72s/it] {'loss': 1.1806, 'grad_norm': 0.0019900591673453886, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:25<12:57, 3.72s/it] 60%|██████ | 312/520 [19:29<12:53, 3.72s/it] {'loss': 1.1718, 'grad_norm': 0.002275137865040261, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:29<12:53, 3.72s/it] 60%|██████ | 313/520 [19:32<12:44, 3.69s/it] {'loss': 1.1643, 'grad_norm': 0.0018886372283955539, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:32<12:44, 3.69s/it] 60%|██████ | 314/520 [19:36<13:06, 3.82s/it] {'loss': 1.2036, 'grad_norm': 0.0018792592992448432, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:36<13:06, 3.82s/it] 61%|██████ | 315/520 [19:40<12:51, 3.77s/it] {'loss': 1.3241, 'grad_norm': 0.002789144514308743, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:40<12:51, 3.77s/it] 61%|██████ | 316/520 [19:44<13:06, 3.86s/it] {'loss': 1.1763, 'grad_norm': 0.0026404539341487888, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:44<13:06, 3.86s/it] 61%|██████ | 317/520 [19:48<12:50, 3.80s/it] {'loss': 1.1946, 'grad_norm': 0.001862924678781047, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:48<12:50, 3.80s/it] 61%|██████ | 318/520 [19:52<12:38, 3.76s/it] {'loss': 1.3196, 'grad_norm': 0.0022276496415412996, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:52<12:38, 3.76s/it] 61%|██████▏ | 319/520 [19:55<12:48, 3.82s/it] {'loss': 1.1815, 'grad_norm': 0.0020320086117324872, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [19:55<12:48, 3.82s/it] 62%|██████▏ | 320/520 [19:59<12:34, 3.77s/it] {'loss': 1.1249, 'grad_norm': 0.0022484281862216182, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [19:59<12:34, 3.77s/it] 62%|██████▏ | 321/520 [20:03<12:22, 3.73s/it] {'loss': 1.3242, 'grad_norm': 0.0022381959337468478, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:03<12:22, 3.73s/it] 62%|██████▏ | 322/520 [20:06<12:16, 3.72s/it] {'loss': 1.1875, 'grad_norm': 0.0019862493210157173, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:06<12:16, 3.72s/it] 62%|██████▏ | 323/520 [20:10<12:07, 3.69s/it] {'loss': 1.2653, 'grad_norm': 0.002387523617163079, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:10<12:07, 3.69s/it] 62%|██████▏ | 324/520 [20:14<12:01, 3.68s/it] {'loss': 1.2556, 'grad_norm': 0.0021903178067648596, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:14<12:01, 3.68s/it] 62%|██████▎ | 325/520 [20:17<11:55, 3.67s/it] {'loss': 1.2744, 'grad_norm': 0.0022095603297405844, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:17<11:55, 3.67s/it] 63%|██████▎ | 326/520 [20:21<11:49, 3.66s/it] {'loss': 1.2538, 'grad_norm': 0.0020506458003389043, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:21<11:49, 3.66s/it] 63%|██████▎ | 327/520 [20:25<11:43, 3.65s/it] {'loss': 1.3367, 'grad_norm': 0.0024830509263629565, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:25<11:43, 3.65s/it] 63%|██████▎ | 328/520 [20:28<11:40, 3.65s/it] {'loss': 1.3171, 'grad_norm': 0.0021313463306260705, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:28<11:40, 3.65s/it] 63%|██████▎ | 329/520 [20:32<11:35, 3.64s/it] {'loss': 1.1745, 'grad_norm': 0.001779298216926677, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:32<11:35, 3.64s/it] 63%|██████▎ | 330/520 [20:36<11:32, 3.64s/it] {'loss': 1.2543, 'grad_norm': 0.0018662527645317388, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:36<11:32, 3.64s/it] 64%|██████▎ | 331/520 [20:39<11:28, 3.64s/it] {'loss': 1.2097, 'grad_norm': 0.0019335342618961287, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:39<11:28, 3.64s/it] 64%|██████▍ | 332/520 [20:43<11:24, 3.64s/it] {'loss': 1.3463, 'grad_norm': 0.001987104415602791, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:43<11:24, 3.64s/it] 64%|██████▍ | 333/520 [20:46<11:20, 3.64s/it] {'loss': 1.3682, 'grad_norm': 0.0021581666937927238, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:46<11:20, 3.64s/it] 64%|██████▍ | 334/520 [20:50<11:17, 3.64s/it] {'loss': 1.2588, 'grad_norm': 0.002355078170743081, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:50<11:17, 3.64s/it] 64%|██████▍ | 335/520 [20:54<11:13, 3.64s/it] {'loss': 1.2522, 'grad_norm': 0.0018068899237473177, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:54<11:13, 3.64s/it] 65%|██████▍ | 336/520 [20:57<11:11, 3.65s/it] {'loss': 1.1458, 'grad_norm': 0.0022321256888792555, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [20:57<11:11, 3.65s/it] 65%|██████▍ | 337/520 [21:01<11:06, 3.64s/it] {'loss': 1.1353, 'grad_norm': 0.002056772741693283, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:01<11:06, 3.64s/it] 65%|██████▌ | 338/520 [21:05<11:03, 3.65s/it] {'loss': 1.2634, 'grad_norm': 0.002049685703722335, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:05<11:03, 3.65s/it] 65%|██████▌ | 339/520 [21:08<11:06, 3.68s/it] {'loss': 1.2078, 'grad_norm': 0.00203079919158206, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:09<11:06, 3.68s/it] 65%|██████▌ | 340/520 [21:12<11:04, 3.69s/it] {'loss': 1.1956, 'grad_norm': 0.0020443106705841547, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:12<11:04, 3.69s/it] 66%|██████▌ | 341/520 [21:16<11:00, 3.69s/it] {'loss': 1.2205, 'grad_norm': 0.002135474322552299, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:16<11:00, 3.69s/it] 66%|██████▌ | 342/520 [21:20<10:53, 3.67s/it] {'loss': 1.3097, 'grad_norm': 0.0023908025328521917, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:20<10:53, 3.67s/it] 66%|██████▌ | 343/520 [21:23<10:50, 3.67s/it] {'loss': 1.2689, 'grad_norm': 0.0020786432426577804, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:23<10:50, 3.67s/it] 66%|██████▌ | 344/520 [21:27<10:46, 3.67s/it] {'loss': 1.1661, 'grad_norm': 0.0021219243147779796, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:27<10:46, 3.67s/it] 66%|██████▋ | 345/520 [21:31<10:44, 3.69s/it] {'loss': 1.2923, 'grad_norm': 0.0023060952422809147, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:31<10:44, 3.69s/it] 67%|██████▋ | 346/520 [21:34<10:42, 3.69s/it] {'loss': 1.266, 'grad_norm': 0.001912165463103485, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:34<10:42, 3.69s/it] 67%|██████▋ | 347/520 [21:38<10:40, 3.70s/it] {'loss': 1.1875, 'grad_norm': 0.0018652907508148828, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:38<10:40, 3.70s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:42<10:37, 3.70s/it] {'loss': 1.1464, 'grad_norm': 0.0024210609731442926, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:42<10:37, 3.70s/it] 67%|██████▋ | 349/520 [21:46<10:37, 3.73s/it] {'loss': 1.1894, 'grad_norm': 0.0021415425776843055, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:46<10:37, 3.73s/it] 67%|██████▋ | 350/520 [21:49<10:38, 3.76s/it] {'loss': 1.2311, 'grad_norm': 0.002159864058946374, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:49<10:38, 3.76s/it] 68%|██████▊ | 351/520 [21:53<10:38, 3.78s/it] {'loss': 1.138, 'grad_norm': 0.0018855835878876818, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:53<10:38, 3.78s/it] 68%|██████▊ | 352/520 [21:57<10:31, 3.76s/it] {'loss': 1.2624, 'grad_norm': 0.00196100449780883, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [21:57<10:31, 3.76s/it] 68%|██████▊ | 353/520 [22:01<10:32, 3.79s/it] {'loss': 1.2169, 'grad_norm': 0.0017536918522470571, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:01<10:32, 3.79s/it] 68%|██████▊ | 354/520 [22:04<10:27, 3.78s/it] {'loss': 1.3475, 'grad_norm': 0.002008571585420781, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:04<10:27, 3.78s/it] 68%|██████▊ | 355/520 [22:08<10:16, 3.74s/it] {'loss': 1.1986, 'grad_norm': 0.002080741057146105, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:08<10:16, 3.74s/it] 68%|██████▊ | 356/520 [22:12<10:10, 3.72s/it] {'loss': 1.199, 'grad_norm': 0.0020896812314252153, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:12<10:10, 3.72s/it] 69%|██████▊ | 357/520 [22:16<10:09, 3.74s/it] {'loss': 1.2236, 'grad_norm': 0.0018761014743681307, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:16<10:09, 3.74s/it] 69%|██████▉ | 358/520 [22:19<10:04, 3.73s/it] {'loss': 1.1537, 'grad_norm': 0.001975087239486439, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:19<10:04, 3.73s/it] 69%|██████▉ | 359/520 [22:23<09:59, 3.72s/it] {'loss': 1.284, 'grad_norm': 0.002175555893219577, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:23<09:59, 3.72s/it] 69%|██████▉ | 360/520 [22:27<09:58, 3.74s/it] {'loss': 1.2995, 'grad_norm': 0.0024579677244252855, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:27<09:58, 3.74s/it] 69%|██████▉ | 361/520 [22:30<09:51, 3.72s/it] {'loss': 1.2915, 'grad_norm': 0.001873296422523511, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:30<09:51, 3.72s/it] 70%|██████▉ | 362/520 [22:34<09:48, 3.72s/it] {'loss': 1.2188, 'grad_norm': 0.002120289416321264, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:34<09:48, 3.72s/it] 70%|██████▉ | 363/520 [22:38<09:42, 3.71s/it] {'loss': 1.2386, 'grad_norm': 0.0019423501275596, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:38<09:42, 3.71s/it] 70%|███████ | 364/520 [22:42<09:36, 3.70s/it] {'loss': 1.305, 'grad_norm': 0.0019592516074694692, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:42<09:36, 3.70s/it] 70%|███████ | 365/520 [22:45<09:30, 3.68s/it] {'loss': 1.298, 'grad_norm': 0.0020941782692286983, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:45<09:30, 3.68s/it] 70%|███████ | 366/520 [22:49<09:25, 3.67s/it] {'loss': 1.2514, 'grad_norm': 0.001964251038568692, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:49<09:25, 3.67s/it] 71%|███████ | 367/520 [22:52<09:20, 3.66s/it] {'loss': 1.2467, 'grad_norm': 0.001977468361743408, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [22:52<09:20, 3.66s/it] 71%|███████ | 368/520 [22:56<09:15, 3.65s/it] {'loss': 1.1075, 'grad_norm': 0.002194152100513908, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [22:56<09:15, 3.65s/it] 71%|███████ | 369/520 [23:00<09:10, 3.65s/it] {'loss': 1.2637, 'grad_norm': 0.0019689756073597844, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:00<09:10, 3.65s/it] 71%|███████ | 370/520 [23:03<09:08, 3.65s/it] {'loss': 1.1627, 'grad_norm': 0.0018332828916767694, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:03<09:08, 3.65s/it] 71%|███████▏ | 371/520 [23:07<09:04, 3.66s/it] {'loss': 1.1645, 'grad_norm': 0.0020419826994162492, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:07<09:04, 3.66s/it] 72%|███████▏ | 372/520 [23:11<09:00, 3.65s/it] {'loss': 1.3524, 'grad_norm': 0.0018834751051029935, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:11<09:00, 3.65s/it] 72%|███████▏ | 373/520 [23:14<08:57, 3.65s/it] {'loss': 1.2249, 'grad_norm': 0.002118069894765489, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:14<08:57, 3.65s/it] 72%|███████▏ | 374/520 [23:18<08:55, 3.67s/it] {'loss': 1.2476, 'grad_norm': 0.0019605686173085053, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:18<08:55, 3.67s/it] 72%|███████▏ | 375/520 [23:22<09:01, 3.74s/it] {'loss': 1.1605, 'grad_norm': 0.002042139420348697, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:22<09:01, 3.74s/it] 72%|███████▏ | 376/520 [23:26<09:03, 3.77s/it] {'loss': 1.2748, 'grad_norm': 0.0017557691132633019, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:26<09:03, 3.77s/it] 72%|███████▎ | 377/520 [23:30<09:01, 3.79s/it] {'loss': 1.22, 'grad_norm': 0.0024950957943429327, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:30<09:01, 3.79s/it] 73%|███████▎ | 378/520 [23:34<09:01, 3.82s/it] {'loss': 1.2711, 'grad_norm': 0.0019633989406312893, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:34<09:01, 3.82s/it] 73%|███████▎ | 379/520 [23:37<08:59, 3.83s/it] {'loss': 1.2487, 'grad_norm': 0.0018664820719861838, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:37<08:59, 3.83s/it] 73%|███████▎ | 380/520 [23:41<08:56, 3.83s/it] {'loss': 1.3348, 'grad_norm': 0.002489539112741632, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:41<08:56, 3.83s/it] 73%|███████▎ | 381/520 [23:45<08:55, 3.85s/it] {'loss': 1.2461, 'grad_norm': 0.0019142819263809892, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:45<08:55, 3.85s/it] 73%|███████▎ | 382/520 [23:49<08:53, 3.86s/it] {'loss': 1.2829, 'grad_norm': 0.002169132479641066, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:49<08:53, 3.86s/it] 74%|███████▎ | 383/520 [23:53<08:48, 3.86s/it] {'loss': 1.0846, 'grad_norm': 0.0021357705490413987, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:53<08:48, 3.86s/it] 74%|███████▍ | 384/520 [23:57<08:44, 3.86s/it] {'loss': 1.356, 'grad_norm': 0.002049125262621935, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [23:57<08:44, 3.86s/it] 74%|███████▍ | 385/520 [24:01<08:40, 3.85s/it] {'loss': 1.2275, 'grad_norm': 0.0018433104039369142, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:01<08:40, 3.85s/it] 74%|███████▍ | 386/520 [24:04<08:36, 3.86s/it] {'loss': 1.1754, 'grad_norm': 0.0016950962736183668, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:04<08:36, 3.86s/it] 74%|███████▍ | 387/520 [24:08<08:33, 3.86s/it] {'loss': 1.3474, 'grad_norm': 0.0019100898729690183, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:08<08:33, 3.86s/it] 75%|███████▍ | 388/520 [24:12<08:30, 3.87s/it] {'loss': 1.126, 'grad_norm': 0.0018196465966137266, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:12<08:30, 3.87s/it] 75%|███████▍ | 389/520 [24:16<08:27, 3.88s/it] {'loss': 1.1821, 'grad_norm': 0.002244534421512518, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:16<08:27, 3.88s/it] 75%|███████▌ | 390/520 [24:20<08:24, 3.88s/it] {'loss': 1.2416, 'grad_norm': 0.0019017800945688589, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:20<08:24, 3.88s/it] 75%|███████▌ | 391/520 [24:24<08:21, 3.89s/it] {'loss': 1.3231, 'grad_norm': 0.0020639497724230355, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:24<08:21, 3.89s/it] 75%|███████▌ | 392/520 [24:28<08:18, 3.89s/it] {'loss': 1.1298, 'grad_norm': 0.001882800706462555, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:28<08:18, 3.89s/it] 76%|███████▌ | 393/520 [24:32<08:11, 3.87s/it] {'loss': 1.1715, 'grad_norm': 0.0018816728636615412, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:32<08:11, 3.87s/it] 76%|███████▌ | 394/520 [24:35<08:03, 3.84s/it] {'loss': 1.1929, 'grad_norm': 0.002073895195871532, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:35<08:03, 3.84s/it] 76%|███████▌ | 395/520 [24:39<07:52, 3.78s/it] {'loss': 1.1582, 'grad_norm': 0.002214287020075431, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:39<07:52, 3.78s/it] 76%|███████▌ | 396/520 [24:43<07:42, 3.73s/it] {'loss': 1.2424, 'grad_norm': 0.0020494130698494547, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:43<07:42, 3.73s/it] 76%|███████▋ | 397/520 [24:46<07:36, 3.71s/it] {'loss': 1.228, 'grad_norm': 0.0018447569553099617, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:46<07:36, 3.71s/it] 77%|███████▋ | 398/520 [24:50<07:30, 3.70s/it] {'loss': 1.225, 'grad_norm': 0.0020761141681116856, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:50<07:30, 3.70s/it] 77%|███████▋ | 399/520 [24:54<07:25, 3.68s/it] {'loss': 1.2131, 'grad_norm': 0.0019407265943183572, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [24:54<07:25, 3.68s/it] 77%|███████▋ | 400/520 [24:57<07:21, 3.68s/it] {'loss': 1.2579, 'grad_norm': 0.001862571975311656, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [24:57<07:21, 3.68s/it] 77%|███████▋ | 401/520 [25:01<07:15, 3.66s/it] {'loss': 1.0556, 'grad_norm': 0.002174388513300301, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:01<07:15, 3.66s/it] 77%|███████▋ | 402/520 [25:05<07:12, 3.67s/it] {'loss': 1.174, 'grad_norm': 0.002155733046444545, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:05<07:12, 3.67s/it] 78%|███████▊ | 403/520 [25:08<07:08, 3.66s/it] {'loss': 1.2037, 'grad_norm': 0.0022100634826858053, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:08<07:08, 3.66s/it] 78%|███████▊ | 404/520 [25:12<07:02, 3.64s/it] {'loss': 1.1162, 'grad_norm': 0.0024140587492088015, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:12<07:02, 3.64s/it] 78%|███████▊ | 405/520 [25:15<06:59, 3.65s/it] {'loss': 1.2126, 'grad_norm': 0.001899331918574042, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:15<06:59, 3.65s/it] 78%|███████▊ | 406/520 [25:19<06:56, 3.65s/it] {'loss': 1.1444, 'grad_norm': 0.0022589302166035213, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:19<06:56, 3.65s/it] 78%|███████▊ | 407/520 [25:23<06:54, 3.66s/it] {'loss': 1.2961, 'grad_norm': 0.0020175202058105427, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:23<06:54, 3.66s/it] 78%|███████▊ | 408/520 [25:27<06:56, 3.72s/it] {'loss': 1.1868, 'grad_norm': 0.002055231458466724, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:27<06:56, 3.72s/it] 79%|███████▊ | 409/520 [25:31<06:57, 3.76s/it] {'loss': 1.3136, 'grad_norm': 0.002171053221355395, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:31<06:57, 3.76s/it] 79%|███████▉ | 410/520 [25:34<06:56, 3.79s/it] {'loss': 1.0383, 'grad_norm': 0.0019416694794212597, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:34<06:56, 3.79s/it] 79%|███████▉ | 411/520 [25:38<06:55, 3.81s/it] {'loss': 1.2891, 'grad_norm': 0.0023242051649349704, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:38<06:55, 3.81s/it] 79%|███████▉ | 412/520 [25:42<06:52, 3.82s/it] {'loss': 1.2061, 'grad_norm': 0.002114056620601696, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:42<06:52, 3.82s/it] 79%|███████▉ | 413/520 [25:46<06:50, 3.84s/it] {'loss': 1.2412, 'grad_norm': 0.0019728702363763827, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:46<06:50, 3.84s/it] 80%|███████▉ | 414/520 [25:50<06:48, 3.85s/it] {'loss': 1.0412, 'grad_norm': 0.0017057708794056359, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:50<06:48, 3.85s/it] 80%|███████▉ | 415/520 [25:54<06:45, 3.86s/it] {'loss': 1.1774, 'grad_norm': 0.0019174255521188616, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [25:54<06:45, 3.86s/it] 80%|████████ | 416/520 [25:58<06:43, 3.88s/it] {'loss': 1.1007, 'grad_norm': 0.002364532483313785, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [25:58<06:43, 3.88s/it] 80%|████████ | 417/520 [26:01<06:32, 3.81s/it] {'loss': 1.262, 'grad_norm': 0.0022138836849446195, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:01<06:32, 3.81s/it] 80%|████████ | 418/520 [26:05<06:24, 3.77s/it] {'loss': 1.2411, 'grad_norm': 0.0018656064955470683, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:05<06:24, 3.77s/it] 81%|████████ | 419/520 [26:09<06:17, 3.74s/it] {'loss': 1.2325, 'grad_norm': 0.0020813842603572396, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:09<06:17, 3.74s/it] 81%|████████ | 420/520 [26:12<06:11, 3.71s/it] {'loss': 1.1221, 'grad_norm': 0.0020996383867334547, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:12<06:11, 3.71s/it] 81%|████████ | 421/520 [26:16<06:06, 3.70s/it] {'loss': 1.0548, 'grad_norm': 0.0021145080878644923, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:16<06:06, 3.70s/it] 81%|████████ | 422/520 [26:20<06:01, 3.69s/it] {'loss': 1.177, 'grad_norm': 0.002025145771484452, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:20<06:01, 3.69s/it] 81%|████████▏ | 423/520 [26:23<05:56, 3.68s/it] {'loss': 1.162, 'grad_norm': 0.002313015848969406, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:23<05:56, 3.68s/it] 82%|████████▏ | 424/520 [26:27<05:53, 3.68s/it] {'loss': 1.3266, 'grad_norm': 0.002069221721158885, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:27<05:53, 3.68s/it] 82%|████████▏ | 425/520 [26:31<05:48, 3.67s/it] {'loss': 1.1702, 'grad_norm': 0.0018997112288170984, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:31<05:48, 3.67s/it] 82%|████████▏ | 426/520 [26:34<05:44, 3.66s/it] {'loss': 1.1975, 'grad_norm': 0.002642734590254597, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:34<05:44, 3.66s/it] 82%|████████▏ | 427/520 [26:38<05:40, 3.66s/it] {'loss': 1.1031, 'grad_norm': 0.0018723211563560116, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:38<05:40, 3.66s/it] 82%|████████▏ | 428/520 [26:42<05:35, 3.65s/it] {'loss': 1.0872, 'grad_norm': 0.0020377219739055926, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:42<05:35, 3.65s/it] 82%|████████▎ | 429/520 [26:45<05:32, 3.66s/it] {'loss': 1.1826, 'grad_norm': 0.001952394390178331, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:45<05:32, 3.66s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:49<05:30, 3.67s/it] {'loss': 1.1806, 'grad_norm': 0.0017759945989252008, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:49<05:30, 3.67s/it] 83%|████████▎ | 431/520 [26:53<05:26, 3.66s/it] {'loss': 1.2152, 'grad_norm': 0.002143744030303925, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [26:53<05:26, 3.66s/it] 83%|████████▎ | 432/520 [26:56<05:22, 3.66s/it] {'loss': 1.0912, 'grad_norm': 0.0020728056641033427, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [26:56<05:22, 3.66s/it] 83%|████████▎ | 433/520 [27:00<05:17, 3.65s/it] {'loss': 1.2252, 'grad_norm': 0.0019208812976340137, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:00<05:17, 3.65s/it] 83%|████████▎ | 434/520 [27:03<05:13, 3.65s/it] {'loss': 0.9628, 'grad_norm': 0.0018912867080262442, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:03<05:13, 3.65s/it] 84%|████████▎ | 435/520 [27:07<05:09, 3.64s/it] {'loss': 1.2606, 'grad_norm': 0.002339454359279303, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:07<05:09, 3.64s/it] 84%|████████▍ | 436/520 [27:11<05:05, 3.64s/it] {'loss': 1.0535, 'grad_norm': 0.0018892936422914425, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:11<05:05, 3.64s/it] 84%|████████▍ | 437/520 [27:14<05:02, 3.64s/it] {'loss': 1.2871, 'grad_norm': 0.001971700043364709, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:14<05:02, 3.64s/it] 84%|████████▍ | 438/520 [27:18<04:58, 3.64s/it] {'loss': 1.0974, 'grad_norm': 0.001991560613096598, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:18<04:58, 3.64s/it] 84%|████████▍ | 439/520 [27:22<04:57, 3.67s/it] {'loss': 1.1856, 'grad_norm': 0.0017586964242433533, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:22<04:57, 3.67s/it] 85%|████████▍ | 440/520 [27:26<04:57, 3.72s/it] {'loss': 1.1436, 'grad_norm': 0.0019414012002701355, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:26<04:57, 3.72s/it] 85%|████████▍ | 441/520 [27:29<04:56, 3.75s/it] {'loss': 1.2057, 'grad_norm': 0.0019112139200554055, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:29<04:56, 3.75s/it] 85%|████████▌ | 442/520 [27:33<04:54, 3.78s/it] {'loss': 1.1968, 'grad_norm': 0.0022239386710900433, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:33<04:54, 3.78s/it] 85%|████████▌ | 443/520 [27:37<04:51, 3.78s/it] {'loss': 1.2158, 'grad_norm': 0.0020032741876059405, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:37<04:51, 3.78s/it] 85%|████████▌ | 444/520 [27:41<04:47, 3.79s/it] {'loss': 1.1779, 'grad_norm': 0.0017299080307258635, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:41<04:47, 3.79s/it] 86%|████████▌ | 445/520 [27:45<04:43, 3.78s/it] {'loss': 1.1053, 'grad_norm': 0.0019027977465268216, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:45<04:43, 3.78s/it] 86%|████████▌ | 446/520 [27:48<04:40, 3.79s/it] {'loss': 1.2846, 'grad_norm': 0.0018595213735830877, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:48<04:40, 3.79s/it] 86%|████████▌ | 447/520 [27:52<04:35, 3.78s/it] {'loss': 1.1941, 'grad_norm': 0.0019378940563324189, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [27:52<04:35, 3.78s/it] 86%|████████▌ | 448/520 [27:56<04:31, 3.78s/it] {'loss': 1.1722, 'grad_norm': 0.0020278667197496736, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [27:56<04:31, 3.78s/it] 86%|████████▋ | 449/520 [28:00<04:28, 3.78s/it] {'loss': 1.2329, 'grad_norm': 0.0019923606077005282, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:00<04:28, 3.78s/it] 87%|████████▋ | 450/520 [28:04<04:24, 3.78s/it] {'loss': 1.2094, 'grad_norm': 0.0020125792605913792, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:04<04:24, 3.78s/it] 87%|████████▋ | 451/520 [28:07<04:20, 3.77s/it] {'loss': 1.2048, 'grad_norm': 0.0019947753781494612, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:07<04:20, 3.77s/it] 87%|████████▋ | 452/520 [28:11<04:15, 3.76s/it] {'loss': 1.2709, 'grad_norm': 0.0019125785963838757, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:11<04:15, 3.76s/it] 87%|████████▋ | 453/520 [28:15<04:10, 3.74s/it] {'loss': 1.2633, 'grad_norm': 0.001997369811296012, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:15<04:10, 3.74s/it] 87%|████████▋ | 454/520 [28:18<04:06, 3.73s/it] {'loss': 1.1183, 'grad_norm': 0.002129099885365621, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:18<04:06, 3.73s/it] 88%|████████▊ | 455/520 [28:22<04:00, 3.70s/it] {'loss': 1.2525, 'grad_norm': 0.0019593744084918626, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:22<04:00, 3.70s/it] 88%|████████▊ | 456/520 [28:26<03:56, 3.69s/it] {'loss': 1.1702, 'grad_norm': 0.0020216380365901017, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:26<03:56, 3.69s/it] 88%|████████▊ | 457/520 [28:29<03:51, 3.67s/it] {'loss': 1.1891, 'grad_norm': 0.0018440552885214518, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:29<03:51, 3.67s/it] 88%|████████▊ | 458/520 [28:33<03:46, 3.65s/it] {'loss': 1.3111, 'grad_norm': 0.0021041017765174364, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:33<03:46, 3.65s/it] 88%|████████▊ | 459/520 [28:37<03:42, 3.65s/it] {'loss': 1.25, 'grad_norm': 0.002046546112081236, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:37<03:42, 3.65s/it] 88%|████████▊ | 460/520 [28:40<03:38, 3.64s/it] {'loss': 1.1205, 'grad_norm': 0.001933903981737447, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:40<03:38, 3.64s/it] 89%|████████▊ | 461/520 [28:44<03:35, 3.65s/it] {'loss': 1.2745, 'grad_norm': 0.0016600565955704088, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:44<03:35, 3.65s/it] 89%|████████▉ | 462/520 [28:48<03:31, 3.65s/it] {'loss': 1.3241, 'grad_norm': 0.0019512096401631894, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:48<03:31, 3.65s/it] 89%|████████▉ | 463/520 [28:51<03:27, 3.65s/it] {'loss': 1.0785, 'grad_norm': 0.0020454941180490763, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [28:51<03:27, 3.65s/it] 89%|████████▉ | 464/520 [28:55<03:24, 3.65s/it] {'loss': 1.2271, 'grad_norm': 0.0020662146673877833, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [28:55<03:24, 3.65s/it] 89%|████████▉ | 465/520 [28:58<03:20, 3.65s/it] {'loss': 1.3393, 'grad_norm': 0.0021839663328900023, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [28:58<03:20, 3.65s/it] 90%|████████▉ | 466/520 [29:02<03:16, 3.64s/it] {'loss': 1.2095, 'grad_norm': 0.0017681131623406593, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:02<03:16, 3.64s/it] 90%|████████▉ | 467/520 [29:06<03:13, 3.66s/it] {'loss': 1.2043, 'grad_norm': 0.0018297806680165885, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:06<03:13, 3.66s/it] 90%|█████████ | 468/520 [29:09<03:09, 3.65s/it] {'loss': 1.193, 'grad_norm': 0.002236910334726393, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:09<03:09, 3.65s/it] 90%|█████████ | 469/520 [29:13<03:05, 3.64s/it] {'loss': 1.2416, 'grad_norm': 0.0021371443323498396, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:13<03:05, 3.64s/it] 90%|█████████ | 470/520 [29:17<03:02, 3.64s/it] {'loss': 1.1214, 'grad_norm': 0.0017792451120569343, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:17<03:02, 3.64s/it] 91%|█████████ | 471/520 [29:20<02:58, 3.64s/it] {'loss': 1.1419, 'grad_norm': 0.0020295335849561903, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:20<02:58, 3.64s/it] 91%|█████████ | 472/520 [29:24<02:55, 3.65s/it] {'loss': 1.1147, 'grad_norm': 0.002002071398396733, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:24<02:55, 3.65s/it] 91%|█████████ | 473/520 [29:28<02:51, 3.66s/it] {'loss': 1.1725, 'grad_norm': 0.002003018038987398, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:28<02:51, 3.66s/it] 91%|█████████ | 474/520 [29:31<02:48, 3.65s/it] {'loss': 1.2426, 'grad_norm': 0.0018356908415292277, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:31<02:48, 3.65s/it] 91%|█████████▏| 475/520 [29:35<02:44, 3.66s/it] {'loss': 1.1592, 'grad_norm': 0.0018249713207000395, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:35<02:44, 3.66s/it] 92%|█████████▏| 476/520 [29:39<02:40, 3.66s/it] {'loss': 1.172, 'grad_norm': 0.0019817115828941463, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:39<02:40, 3.66s/it] 92%|█████████▏| 477/520 [29:42<02:36, 3.65s/it] {'loss': 1.1517, 'grad_norm': 0.002138492951282436, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:42<02:36, 3.65s/it] 92%|█████████▏| 478/520 [29:46<02:33, 3.66s/it] {'loss': 1.1129, 'grad_norm': 0.0019329163751197877, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:46<02:33, 3.66s/it] 92%|█████████▏| 479/520 [29:50<02:31, 3.69s/it] {'loss': 1.2167, 'grad_norm': 0.0020132981515141525, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:50<02:31, 3.69s/it] 92%|█████████▏| 480/520 [29:53<02:27, 3.68s/it] {'loss': 1.2356, 'grad_norm': 0.0018992150932289728, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [29:53<02:27, 3.68s/it] 92%|█████████▎| 481/520 [29:57<02:23, 3.69s/it] {'loss': 1.2353, 'grad_norm': 0.0018327191470484995, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [29:57<02:23, 3.69s/it] 93%|█████████▎| 482/520 [30:01<02:20, 3.69s/it] {'loss': 1.2453, 'grad_norm': 0.0020314308757121615, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:01<02:20, 3.69s/it] 93%|█████████▎| 483/520 [30:04<02:16, 3.68s/it] {'loss': 1.1832, 'grad_norm': 0.0020838229202473422, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:04<02:16, 3.68s/it] 93%|█████████▎| 484/520 [30:08<02:12, 3.68s/it] {'loss': 1.1961, 'grad_norm': 0.002032355041006913, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:08<02:12, 3.68s/it] 93%|█████████▎| 485/520 [30:12<02:08, 3.67s/it] {'loss': 1.1341, 'grad_norm': 0.0018997866794941386, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:12<02:08, 3.67s/it] 93%|█████████▎| 486/520 [30:15<02:04, 3.67s/it] {'loss': 1.2603, 'grad_norm': 0.0020575146886498818, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:15<02:04, 3.67s/it] 94%|█████████▎| 487/520 [30:19<02:00, 3.67s/it] {'loss': 1.1133, 'grad_norm': 0.0019090487858298636, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:19<02:00, 3.67s/it] 94%|█████████▍| 488/520 [30:23<01:57, 3.66s/it] {'loss': 1.0566, 'grad_norm': 0.0019944050792171765, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:23<01:57, 3.66s/it] 94%|█████████▍| 489/520 [30:26<01:53, 3.66s/it] {'loss': 1.2378, 'grad_norm': 0.0017031307452240586, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:26<01:53, 3.66s/it] 94%|█████████▍| 490/520 [30:30<01:49, 3.66s/it] {'loss': 1.1772, 'grad_norm': 0.002054401340381226, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:30<01:49, 3.66s/it] 94%|█████████▍| 491/520 [30:34<01:46, 3.66s/it] {'loss': 1.1341, 'grad_norm': 0.0020248548231170064, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:34<01:46, 3.66s/it] 95%|█████████▍| 492/520 [30:37<01:42, 3.66s/it] {'loss': 1.2601, 'grad_norm': 0.0020387405735126317, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:37<01:42, 3.66s/it] 95%|█████████▍| 493/520 [30:41<01:38, 3.67s/it] {'loss': 1.2733, 'grad_norm': 0.0020737027021810457, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:41<01:38, 3.67s/it] 95%|█████████▌| 494/520 [30:45<01:35, 3.67s/it] {'loss': 1.1947, 'grad_norm': 0.0018007143967327193, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:45<01:35, 3.67s/it] 95%|█████████▌| 495/520 [30:48<01:31, 3.66s/it] {'loss': 1.1571, 'grad_norm': 0.0019388478150027093, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [30:48<01:31, 3.66s/it] 95%|█████████▌| 496/520 [30:52<01:27, 3.67s/it] {'loss': 1.0778, 'grad_norm': 0.0021836027411482706, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [30:52<01:27, 3.67s/it] 96%|█████████▌| 497/520 [30:56<01:24, 3.67s/it] {'loss': 1.1747, 'grad_norm': 0.0017248786137425002, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [30:56<01:24, 3.67s/it] 96%|█████████▌| 498/520 [30:59<01:20, 3.67s/it] {'loss': 1.1565, 'grad_norm': 0.0020460522849245605, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [30:59<01:20, 3.67s/it] 96%|█████████▌| 499/520 [31:03<01:17, 3.67s/it] {'loss': 1.3121, 'grad_norm': 0.001995408878326669, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:03<01:17, 3.67s/it] 96%|█████████▌| 500/520 [31:07<01:13, 3.67s/it] {'loss': 1.2773, 'grad_norm': 0.0022721387098277126, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:07<01:13, 3.67s/it] 96%|█████████▋| 501/520 [31:10<01:09, 3.67s/it] {'loss': 1.2591, 'grad_norm': 0.003127524146106475, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:10<01:09, 3.67s/it] 97%|█████████▋| 502/520 [31:14<01:06, 3.67s/it] {'loss': 1.1969, 'grad_norm': 0.0018450354883856785, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:14<01:06, 3.67s/it] 97%|█████████▋| 503/520 [31:18<01:02, 3.67s/it] {'loss': 1.2074, 'grad_norm': 0.002015426244963339, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:18<01:02, 3.67s/it] 97%|█████████▋| 504/520 [31:21<00:58, 3.66s/it] {'loss': 1.1984, 'grad_norm': 0.002341678445509998, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:21<00:58, 3.66s/it] 97%|█████████▋| 505/520 [31:25<00:54, 3.66s/it] {'loss': 1.23, 'grad_norm': 0.0019338360076388344, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:25<00:54, 3.66s/it] 97%|█████████▋| 506/520 [31:29<00:51, 3.66s/it] {'loss': 1.1482, 'grad_norm': 0.0021345251104645455, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:29<00:51, 3.66s/it] 98%|█████████▊| 507/520 [31:32<00:47, 3.66s/it] {'loss': 1.3522, 'grad_norm': 0.0018598443873463538, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:32<00:47, 3.66s/it] 98%|█████████▊| 508/520 [31:36<00:43, 3.66s/it] {'loss': 1.2716, 'grad_norm': 0.002058335501839074, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:36<00:43, 3.66s/it] 98%|█████████▊| 509/520 [31:40<00:40, 3.66s/it] {'loss': 1.2324, 'grad_norm': 0.0019196870191348762, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:40<00:40, 3.66s/it] 98%|█████████▊| 510/520 [31:43<00:36, 3.66s/it] {'loss': 1.1894, 'grad_norm': 0.0019016038901123187, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:43<00:36, 3.66s/it] 98%|█████████▊| 511/520 [31:47<00:32, 3.66s/it] {'loss': 1.1639, 'grad_norm': 0.0018564100558906324, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [31:47<00:32, 3.66s/it] 98%|█████████▊| 512/520 [31:51<00:29, 3.66s/it] {'loss': 1.0439, 'grad_norm': 0.001955279009350797, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [31:51<00:29, 3.66s/it] 99%|█████████▊| 513/520 [31:54<00:25, 3.69s/it] {'loss': 1.247, 'grad_norm': 0.002171303055718875, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [31:54<00:25, 3.69s/it] 99%|█████████▉| 514/520 [31:58<00:22, 3.68s/it] {'loss': 1.2197, 'grad_norm': 0.001832707840023742, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [31:58<00:22, 3.68s/it] 99%|█████████▉| 515/520 [32:02<00:18, 3.68s/it] {'loss': 1.266, 'grad_norm': 0.002248961613386252, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:02<00:18, 3.68s/it] 99%|█████████▉| 516/520 [32:06<00:14, 3.72s/it] {'loss': 1.1571, 'grad_norm': 0.0018857966362143374, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:06<00:14, 3.72s/it] 99%|█████████▉| 517/520 [32:09<00:11, 3.73s/it] {'loss': 1.258, 'grad_norm': 0.002043882947055939, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:09<00:11, 3.73s/it] 100%|█████████▉| 518/520 [32:13<00:07, 3.74s/it] {'loss': 1.1821, 'grad_norm': 0.002132594557094607, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:13<00:07, 3.74s/it] 100%|█████████▉| 519/520 [32:17<00:03, 3.75s/it] {'loss': 1.2168, 'grad_norm': 0.001913403098005655, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:17<00:03, 3.75s/it] 100%|██████████| 520/520 [32:22<00:00, 4.02s/it] {'loss': 1.2614, 'grad_norm': 0.002102172243645505, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:22<00:00, 4.02s/it] {'train_runtime': 1942.0646, 'train_samples_per_second': 34.257, 'train_steps_per_second': 0.268, 'train_loss': 1.331216181929295, 'epoch': 1.0} + 100%|██████████| 520/520 [32:22<00:00, 4.02s/it] 100%|██████████| 520/520 [32:22<00:00, 3.73s/it] +[2025-10-13 15:12:56,917] [INFO] [launch.py:348:main] Process 828760 exits successfully. +[2025-10-13 15:12:56,917] [INFO] [launch.py:348:main] Process 828757 exits successfully. +[2025-10-13 15:12:56,918] [INFO] [launch.py:348:main] Process 828759 exits successfully. +[2025-10-13 15:12:57,918] [INFO] [launch.py:348:main] Process 828755 exits successfully. +[2025-10-13 15:12:57,919] [INFO] [launch.py:348:main] Process 828756 exits successfully. +[2025-10-13 15:12:57,919] [INFO] [launch.py:348:main] Process 828758 exits successfully. +[2025-10-13 15:12:57,919] [INFO] [launch.py:348:main] Process 828754 exits successfully. +[2025-10-13 15:13:00,923] [INFO] [launch.py:348:main] Process 828753 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.5_2e-1_connector-3.0_2.5_2e-1_ablation_20251013_143914.log +Timestamp: 2025-10-13 15:13:03 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation_20251013_151303.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation_20251013_151303.log new file mode 100644 index 0000000000000000000000000000000000000000..a768d6efd836354675f5fa446d15997a08b8fcdf --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation_20251013_151303.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation_20251013_151303.log +Timestamp: 2025-10-13 15:13:03 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 15:13:06,154] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:08,807] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 15:13:08,808] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 2.7 --temperature_mlp_text 2.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 2.7 --temperature_mlp_vision 2.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 2.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 15:13:11,379] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:12,433] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 15:13:12,433] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 15:13:12,433] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 15:13:12,433] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 15:13:12,433] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 15:13:12,433] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 15:13:12,433] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 15:13:12,436] [INFO] [launch.py:253:main] process 848850 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.7', '--temperature_mlp_text', '2.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.7', '--temperature_mlp_vision', '2.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:13:12,438] [INFO] [launch.py:253:main] process 848851 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.7', '--temperature_mlp_text', '2.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.7', '--temperature_mlp_vision', '2.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:13:12,440] [INFO] [launch.py:253:main] process 848852 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.7', '--temperature_mlp_text', '2.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.7', '--temperature_mlp_vision', '2.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:13:12,442] [INFO] [launch.py:253:main] process 848853 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.7', '--temperature_mlp_text', '2.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.7', '--temperature_mlp_vision', '2.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:13:12,445] [INFO] [launch.py:253:main] process 848854 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.7', '--temperature_mlp_text', '2.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.7', '--temperature_mlp_vision', '2.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:13:12,447] [INFO] [launch.py:253:main] process 848855 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.7', '--temperature_mlp_text', '2.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.7', '--temperature_mlp_vision', '2.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:13:12,449] [INFO] [launch.py:253:main] process 848856 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.7', '--temperature_mlp_text', '2.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.7', '--temperature_mlp_vision', '2.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:13:12,451] [INFO] [launch.py:253:main] process 848857 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.7', '--temperature_mlp_text', '2.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.7', '--temperature_mlp_vision', '2.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 15:13:19,019] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:19,210] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:19,254] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:19,254] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:19,257] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:19,265] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:19,276] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:19,277] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:13:19,431] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:13:19,615] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:13:19,656] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:13:19,657] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:13:19,657] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 15:13:19,669] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:13:19,671] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:13:19,684] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:13:19,686] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 2.7, 'temperature_mlp': 2.7, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 2.7, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.7, + "temperature_mlp": 2.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:848850:848850 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848850:848850 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848850:848850 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:848850:848850 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:848850:848850 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:848850:848850 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:848856:848856 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:848856:848856 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848856:848856 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848856:848856 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:848856:848856 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:848856:848856 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:848855:848855 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:848855:848855 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848855:848855 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848855:848855 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:848855:848855 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:848855:848855 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:848851:848851 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:848851:848851 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848851:848851 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848851:848851 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:848851:848851 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:848851:848851 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:848852:848852 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:848852:848852 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848852:848852 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848852:848852 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:848852:848852 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:848852:848852 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:848857:848857 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:848857:848857 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848857:848857 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848857:848857 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:848857:848857 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:848857:848857 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:848853:848853 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:848853:848853 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848853:848853 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848854:848854 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:848854:848854 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848854:848854 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848853:848853 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:848853:848853 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:848853:848853 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:848854:848854 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:848854:848854 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:848854:848854 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO ncclCommInitRank comm 0x557608fa21c0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x1830035571e92383 - Init START +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO ncclCommInitRank comm 0x55f7bf875f40 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x1830035571e92383 - Init START +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO ncclCommInitRank comm 0x56344bf02d00 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x1830035571e92383 - Init START +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO ncclCommInitRank comm 0x55ba64e9d600 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x1830035571e92383 - Init START +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO ncclCommInitRank comm 0x55f2802063c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x1830035571e92383 - Init START +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO ncclCommInitRank comm 0x55ffdd6b3200 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x1830035571e92383 - Init START +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO ncclCommInitRank comm 0x55c75c55e230 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x1830035571e92383 - Init START +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO ncclCommInitRank comm 0x561840fe4690 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x1830035571e92383 - Init START +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO comm 0x557608fa21c0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO comm 0x55f2802063c0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO comm 0x55ba64e9d600 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO comm 0x55ffdd6b3200 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO comm 0x56344bf02d00 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO comm 0x561840fe4690 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO comm 0x55c75c55e230 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO comm 0x55f7bf875f40 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:848854:850440 [4] NCCL INFO ncclCommInitRank comm 0x55f7bf875f40 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x1830035571e92383 - Init COMPLETE +ywang29-vrdb-test1-worker-0:848857:850438 [7] NCCL INFO ncclCommInitRank comm 0x55ffdd6b3200 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x1830035571e92383 - Init COMPLETE +ywang29-vrdb-test1-worker-0:848855:850435 [5] NCCL INFO ncclCommInitRank comm 0x55ba64e9d600 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x1830035571e92383 - Init COMPLETE +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:848856:850434 [6] NCCL INFO ncclCommInitRank comm 0x561840fe4690 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x1830035571e92383 - Init COMPLETE +ywang29-vrdb-test1-worker-0:848853:850439 [3] NCCL INFO ncclCommInitRank comm 0x557608fa21c0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x1830035571e92383 - Init COMPLETE +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:848852:850437 [2] NCCL INFO ncclCommInitRank comm 0x55f2802063c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x1830035571e92383 - Init COMPLETE +ywang29-vrdb-test1-worker-0:848851:850436 [1] NCCL INFO ncclCommInitRank comm 0x55c75c55e230 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x1830035571e92383 - Init COMPLETE +ywang29-vrdb-test1-worker-0:848850:850433 [0] NCCL INFO ncclCommInitRank comm 0x56344bf02d00 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x1830035571e92383 - Init COMPLETE +[2025-10-13 15:14:04,354] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 15:14:06,118] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 15:14:24,412 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 15:14:24,418 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:848857:855371 [7] NCCL INFO ncclCommInitRank comm 0x7f294006a770 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x2ed4316b2b20399f - Init COMPLETE +ywang29-vrdb-test1-worker-0:848855:855366 [5] NCCL INFO ncclCommInitRank comm 0x7ef90c06b7e0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x2ed4316b2b20399f - Init COMPLETE +ywang29-vrdb-test1-worker-0:848854:855365 [4] NCCL INFO ncclCommInitRank comm 0x7f54a806b450 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x2ed4316b2b20399f - Init COMPLETE +ywang29-vrdb-test1-worker-0:848853:855368 [3] NCCL INFO ncclCommInitRank comm 0x7ef99006b7d0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x2ed4316b2b20399f - Init COMPLETE +ywang29-vrdb-test1-worker-0:848852:855370 [2] NCCL INFO ncclCommInitRank comm 0x7fe88c06a850 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x2ed4316b2b20399f - Init COMPLETE +ywang29-vrdb-test1-worker-0:848850:855364 [0] NCCL INFO ncclCommInitRank comm 0x7fd87806b800 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x2ed4316b2b20399f - Init COMPLETE +ywang29-vrdb-test1-worker-0:848856:855367 [6] NCCL INFO ncclCommInitRank comm 0x7f394406be60 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x2ed4316b2b20399f - Init COMPLETE +ywang29-vrdb-test1-worker-0:848851:855369 [1] NCCL INFO ncclCommInitRank comm 0x7ef8f406bf20 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x2ed4316b2b20399f - Init COMPLETE + 0%| | 1/520 [00:29<4:18:30, 29.89s/it] {'loss': 5.1721, 'grad_norm': 0.17845123407989022, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:29<4:18:30, 29.89s/it] 0%| | 2/520 [00:33<2:05:53, 14.58s/it] {'loss': 4.686, 'grad_norm': 0.16017197609695277, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:33<2:05:53, 14.58s/it] 1%| | 3/520 [00:37<1:23:40, 9.71s/it] {'loss': 2.6758, 'grad_norm': 0.05671537825533658, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:37<1:23:40, 9.71s/it] 1%| | 4/520 [00:41<1:03:48, 7.42s/it] {'loss': 2.1502, 'grad_norm': 0.02156299536757457, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:41<1:03:48, 7.42s/it] 1%| | 5/520 [00:45<52:52, 6.16s/it] {'loss': 2.124, 'grad_norm': 0.027753506543031714, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:45<52:52, 6.16s/it] 1%| | 6/520 [00:49<46:17, 5.40s/it] {'loss': 1.9324, 'grad_norm': 0.01879994369024986, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:49<46:17, 5.40s/it] 1%|▏ | 7/520 [00:53<41:56, 4.91s/it] {'loss': 1.7065, 'grad_norm': 0.014381716423588627, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:53<41:56, 4.91s/it] 2%|▏ | 8/520 [00:57<40:48, 4.78s/it] {'loss': 1.7089, 'grad_norm': 0.00799053817829028, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:57<40:48, 4.78s/it] 2%|▏ | 9/520 [01:01<38:26, 4.51s/it] {'loss': 1.7473, 'grad_norm': 0.008661839494365733, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [01:01<38:26, 4.51s/it] 2%|▏ | 10/520 [01:05<36:45, 4.32s/it] {'loss': 1.525, 'grad_norm': 0.006006335399059514, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [01:05<36:45, 4.32s/it] 2%|▏ | 11/520 [01:09<35:53, 4.23s/it] {'loss': 1.6316, 'grad_norm': 0.009480671031510176, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [01:09<35:53, 4.23s/it] 2%|▏ | 12/520 [01:13<35:06, 4.15s/it] {'loss': 1.5914, 'grad_norm': 0.005717220421113989, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [01:13<35:06, 4.15s/it][2025-10-13 15:15:48,247] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:18<36:09, 4.28s/it] {'loss': 1.5623, 'grad_norm': 0.006693769102150801, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:18<36:09, 4.28s/it] 3%|▎ | 14/520 [01:22<35:09, 4.17s/it] {'loss': 1.5773, 'grad_norm': 0.00477249152159131, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:22<35:09, 4.17s/it] 3%|▎ | 15/520 [01:26<34:28, 4.10s/it] {'loss': 1.6194, 'grad_norm': 0.0068750991005642826, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:26<34:28, 4.10s/it] 3%|▎ | 16/520 [01:29<33:55, 4.04s/it] {'loss': 1.5455, 'grad_norm': 0.004220816564268398, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:29<33:55, 4.04s/it] 3%|▎ | 17/520 [01:33<33:27, 3.99s/it] {'loss': 1.6287, 'grad_norm': 0.005431666763236848, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:33<33:27, 3.99s/it] 3%|▎ | 18/520 [01:37<33:08, 3.96s/it] {'loss': 1.4647, 'grad_norm': 0.005053558568149634, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:37<33:08, 3.96s/it] 4%|▎ | 19/520 [01:41<32:56, 3.95s/it] {'loss': 1.5824, 'grad_norm': 0.004427747492453831, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:41<32:56, 3.95s/it] 4%|▍ | 20/520 [01:45<32:53, 3.95s/it] {'loss': 1.463, 'grad_norm': 0.004372707252106089, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:45<32:53, 3.95s/it] 4%|▍ | 21/520 [01:49<32:55, 3.96s/it] {'loss': 1.609, 'grad_norm': 0.006097890510259927, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:49<32:55, 3.96s/it] 4%|▍ | 22/520 [01:53<32:46, 3.95s/it] {'loss': 1.5997, 'grad_norm': 0.00348818889400195, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:53<32:46, 3.95s/it] 4%|▍ | 23/520 [01:57<32:44, 3.95s/it] {'loss': 1.5373, 'grad_norm': 0.004230926504006738, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:57<32:44, 3.95s/it] 5%|▍ | 24/520 [02:01<32:35, 3.94s/it] {'loss': 1.5272, 'grad_norm': 0.004053442213362769, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [02:01<32:35, 3.94s/it] 5%|▍ | 25/520 [02:05<32:31, 3.94s/it] {'loss': 1.5552, 'grad_norm': 0.003961113728533306, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [02:05<32:31, 3.94s/it] 5%|▌ | 26/520 [02:09<32:23, 3.94s/it] {'loss': 1.5316, 'grad_norm': 0.0033075916454696807, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [02:09<32:23, 3.94s/it] 5%|▌ | 27/520 [02:13<32:18, 3.93s/it] {'loss': 1.4372, 'grad_norm': 0.00371069090930796, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [02:13<32:18, 3.93s/it] 5%|▌ | 28/520 [02:17<32:15, 3.93s/it] {'loss': 1.4292, 'grad_norm': 0.0035716268771093284, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [02:17<32:15, 3.93s/it] 6%|▌ | 29/520 [02:21<32:07, 3.92s/it] {'loss': 1.443, 'grad_norm': 0.0032857255421198787, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:21<32:07, 3.92s/it] 6%|▌ | 30/520 [02:24<32:03, 3.93s/it] {'loss': 1.6181, 'grad_norm': 0.004033833453018112, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:24<32:03, 3.93s/it] 6%|▌ | 31/520 [02:28<31:55, 3.92s/it] {'loss': 1.4362, 'grad_norm': 0.0029657592766741234, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:28<31:55, 3.92s/it] 6%|▌ | 32/520 [02:32<31:54, 3.92s/it] {'loss': 1.6028, 'grad_norm': 0.006716670968243299, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:32<31:54, 3.92s/it] 6%|▋ | 33/520 [02:36<31:55, 3.93s/it] {'loss': 1.4381, 'grad_norm': 0.0034645514848429693, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:36<31:55, 3.93s/it] 7%|▋ | 34/520 [02:40<31:50, 3.93s/it] {'loss': 1.426, 'grad_norm': 0.004635141107707217, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:40<31:50, 3.93s/it] 7%|▋ | 35/520 [02:44<31:50, 3.94s/it] {'loss': 1.4512, 'grad_norm': 0.004684950317653021, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:44<31:50, 3.94s/it] 7%|▋ | 36/520 [02:48<31:40, 3.93s/it] {'loss': 1.5573, 'grad_norm': 0.0033402599813880515, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:48<31:40, 3.93s/it] 7%|▋ | 37/520 [02:52<30:59, 3.85s/it] {'loss': 1.6247, 'grad_norm': 0.006525292120254097, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:52<30:59, 3.85s/it] 7%|▋ | 38/520 [02:55<30:28, 3.79s/it] {'loss': 1.6392, 'grad_norm': 0.005394980349684339, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:55<30:28, 3.79s/it] 8%|▊ | 39/520 [02:59<29:59, 3.74s/it] {'loss': 1.4605, 'grad_norm': 0.0034025432131332372, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:59<29:59, 3.74s/it] 8%|▊ | 40/520 [03:03<29:42, 3.71s/it] {'loss': 1.5039, 'grad_norm': 0.004653822665908083, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [03:03<29:42, 3.71s/it] 8%|▊ | 41/520 [03:06<29:31, 3.70s/it] {'loss': 1.4683, 'grad_norm': 0.003299980920829282, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [03:06<29:31, 3.70s/it] 8%|▊ | 42/520 [03:10<29:21, 3.68s/it] {'loss': 1.5069, 'grad_norm': 0.005178569820401862, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [03:10<29:21, 3.68s/it] 8%|▊ | 43/520 [03:14<29:16, 3.68s/it] {'loss': 1.4922, 'grad_norm': 0.0053006697080324496, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [03:14<29:16, 3.68s/it] 8%|▊ | 44/520 [03:17<29:09, 3.68s/it] {'loss': 1.6392, 'grad_norm': 0.004956459151327151, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [03:17<29:09, 3.68s/it] 9%|▊ | 45/520 [03:21<29:03, 3.67s/it] {'loss': 1.5054, 'grad_norm': 0.0056942875527631765, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:21<29:03, 3.67s/it] 9%|▉ | 46/520 [03:25<28:59, 3.67s/it] {'loss': 1.6594, 'grad_norm': 0.0033698742573947875, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:25<28:59, 3.67s/it] 9%|▉ | 47/520 [03:28<29:00, 3.68s/it] {'loss': 1.4898, 'grad_norm': 0.0053402605385028876, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:28<29:00, 3.68s/it] 9%|▉ | 48/520 [03:32<28:54, 3.68s/it] {'loss': 1.4589, 'grad_norm': 0.003107528946801037, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:32<28:54, 3.68s/it] 9%|▉ | 49/520 [03:36<28:49, 3.67s/it] {'loss': 1.493, 'grad_norm': 0.0035785117075945567, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:36<28:49, 3.67s/it] 10%|▉ | 50/520 [03:39<28:39, 3.66s/it] {'loss': 1.4864, 'grad_norm': 0.0033351889704077223, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:39<28:39, 3.66s/it] 10%|▉ | 51/520 [03:43<28:30, 3.65s/it] {'loss': 1.409, 'grad_norm': 0.00333346382711547, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:43<28:30, 3.65s/it] 10%|█ | 52/520 [03:47<28:30, 3.66s/it] {'loss': 1.5438, 'grad_norm': 0.005320429981356209, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:47<28:30, 3.66s/it] 10%|█ | 53/520 [03:50<28:24, 3.65s/it] {'loss': 1.5409, 'grad_norm': 0.003229900598209696, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:50<28:24, 3.65s/it] 10%|█ | 54/520 [03:54<28:15, 3.64s/it] {'loss': 1.435, 'grad_norm': 0.004489592330096185, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:54<28:15, 3.64s/it] 11%|█ | 55/520 [03:58<28:26, 3.67s/it] {'loss': 1.414, 'grad_norm': 0.0036548042547652946, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:58<28:26, 3.67s/it] 11%|█ | 56/520 [04:01<28:18, 3.66s/it] {'loss': 1.5461, 'grad_norm': 0.0033346956468952704, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [04:01<28:18, 3.66s/it] 11%|█ | 57/520 [04:05<28:19, 3.67s/it] {'loss': 1.4245, 'grad_norm': 0.005927689060441621, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [04:05<28:19, 3.67s/it] 11%|█ | 58/520 [04:09<28:13, 3.67s/it] {'loss': 1.5656, 'grad_norm': 0.0030636431874226525, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [04:09<28:13, 3.67s/it] 11%|█▏ | 59/520 [04:12<28:16, 3.68s/it] {'loss': 1.5415, 'grad_norm': 0.010721668125569721, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [04:12<28:16, 3.68s/it] 12%|█▏ | 60/520 [04:16<28:14, 3.68s/it] {'loss': 1.4929, 'grad_norm': 0.005792676153985453, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:16<28:14, 3.68s/it] 12%|█▏ | 61/520 [04:20<28:08, 3.68s/it] {'loss': 1.5618, 'grad_norm': 0.0031723475794074242, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:20<28:08, 3.68s/it] 12%|█▏ | 62/520 [04:23<27:59, 3.67s/it] {'loss': 1.4577, 'grad_norm': 0.0040776141839621435, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:23<27:59, 3.67s/it] 12%|█▏ | 63/520 [04:27<27:51, 3.66s/it] {'loss': 1.45, 'grad_norm': 0.004728100169118978, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:27<27:51, 3.66s/it] 12%|█▏ | 64/520 [04:31<27:50, 3.66s/it] {'loss': 1.4817, 'grad_norm': 0.0029033460739614518, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:31<27:50, 3.66s/it] 12%|█▎ | 65/520 [04:34<27:49, 3.67s/it] {'loss': 1.4846, 'grad_norm': 0.003483034460746047, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:34<27:49, 3.67s/it] 13%|█▎ | 66/520 [04:38<27:42, 3.66s/it] {'loss': 1.4491, 'grad_norm': 0.004961630893252559, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:38<27:42, 3.66s/it] 13%|█▎ | 67/520 [04:42<27:37, 3.66s/it] {'loss': 1.3299, 'grad_norm': 0.002893327493430705, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:42<27:37, 3.66s/it] 13%|█▎ | 68/520 [04:45<27:30, 3.65s/it] {'loss': 1.3806, 'grad_norm': 0.003051304528110649, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:45<27:30, 3.65s/it] 13%|█▎ | 69/520 [04:49<27:22, 3.64s/it] {'loss': 1.3723, 'grad_norm': 0.004662711581631975, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:49<27:22, 3.64s/it] 13%|█▎ | 70/520 [04:52<27:20, 3.64s/it] {'loss': 1.4211, 'grad_norm': 0.002995219246847932, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:52<27:20, 3.64s/it] 14%|█▎ | 71/520 [04:56<27:16, 3.64s/it] {'loss': 1.3451, 'grad_norm': 0.003286920264806665, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:56<27:16, 3.64s/it] 14%|█▍ | 72/520 [05:00<27:10, 3.64s/it] {'loss': 1.4859, 'grad_norm': 0.003860534208382301, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [05:00<27:10, 3.64s/it] 14%|█▍ | 73/520 [05:03<27:07, 3.64s/it] {'loss': 1.3174, 'grad_norm': 0.0025205050065582606, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [05:03<27:07, 3.64s/it] 14%|█▍ | 74/520 [05:07<27:03, 3.64s/it] {'loss': 1.432, 'grad_norm': 0.002681691263702631, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [05:07<27:03, 3.64s/it] 14%|█▍ | 75/520 [05:11<27:01, 3.64s/it] {'loss': 1.3361, 'grad_norm': 0.003240424574011983, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [05:11<27:01, 3.64s/it] 15%|█▍ | 76/520 [05:14<26:55, 3.64s/it] {'loss': 1.6298, 'grad_norm': 0.004310420731584121, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:14<26:55, 3.64s/it] 15%|█▍ | 77/520 [05:18<26:50, 3.64s/it] {'loss': 1.2665, 'grad_norm': 0.003407840314950678, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:18<26:50, 3.64s/it] 15%|█▌ | 78/520 [05:22<26:50, 3.64s/it] {'loss': 1.3923, 'grad_norm': 0.0031426987508579307, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:22<26:50, 3.64s/it] 15%|█▌ | 79/520 [05:25<27:06, 3.69s/it] {'loss': 1.3683, 'grad_norm': 0.0024258962386806446, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:25<27:06, 3.69s/it] 15%|█▌ | 80/520 [05:29<27:14, 3.71s/it] {'loss': 1.6646, 'grad_norm': 0.007957455236117354, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:29<27:14, 3.71s/it] 16%|█▌ | 81/520 [05:33<27:21, 3.74s/it] {'loss': 1.5174, 'grad_norm': 0.003930048449679001, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:33<27:21, 3.74s/it] 16%|█▌ | 82/520 [05:37<27:27, 3.76s/it] {'loss': 1.444, 'grad_norm': 0.0026214884512712126, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:37<27:27, 3.76s/it] 16%|█▌ | 83/520 [05:40<27:08, 3.73s/it] {'loss': 1.4633, 'grad_norm': 0.0028395057190186367, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:40<27:08, 3.73s/it] 16%|█▌ | 84/520 [05:44<26:53, 3.70s/it] {'loss': 1.4776, 'grad_norm': 0.00403042282177855, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:44<26:53, 3.70s/it] 16%|█▋ | 85/520 [05:48<26:54, 3.71s/it] {'loss': 1.4964, 'grad_norm': 0.002620069521942355, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:48<26:54, 3.71s/it] 17%|█▋ | 86/520 [05:52<26:55, 3.72s/it] {'loss': 1.5162, 'grad_norm': 0.0033754419497235696, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:52<26:55, 3.72s/it] 17%|█▋ | 87/520 [05:55<26:47, 3.71s/it] {'loss': 1.6632, 'grad_norm': 0.010520763749746236, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:55<26:47, 3.71s/it] 17%|█▋ | 88/520 [05:59<26:38, 3.70s/it] {'loss': 1.7478, 'grad_norm': 0.006329047373290578, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:59<26:38, 3.70s/it] 17%|█▋ | 89/520 [06:02<26:25, 3.68s/it] {'loss': 1.4574, 'grad_norm': 0.002884003671019871, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [06:03<26:25, 3.68s/it] 17%|█▋ | 90/520 [06:06<26:20, 3.68s/it] {'loss': 1.3993, 'grad_norm': 0.0028628639748299774, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [06:06<26:20, 3.68s/it] 18%|█▊ | 91/520 [06:10<26:13, 3.67s/it] {'loss': 1.4659, 'grad_norm': 0.0025481145863768396, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:10<26:13, 3.67s/it] 18%|█▊ | 92/520 [06:13<26:06, 3.66s/it] {'loss': 1.4109, 'grad_norm': 0.0027202993986707004, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:13<26:06, 3.66s/it] 18%|█▊ | 93/520 [06:17<26:03, 3.66s/it] {'loss': 1.4181, 'grad_norm': 0.00285246310929249, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:17<26:03, 3.66s/it] 18%|█▊ | 94/520 [06:21<26:00, 3.66s/it] {'loss': 1.5162, 'grad_norm': 0.0031984636821322807, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:21<26:00, 3.66s/it] 18%|█▊ | 95/520 [06:24<25:54, 3.66s/it] {'loss': 1.3943, 'grad_norm': 0.0034614539779315888, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:24<25:54, 3.66s/it] 18%|█▊ | 96/520 [06:28<25:49, 3.66s/it] {'loss': 1.4145, 'grad_norm': 0.0025763101273737184, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:28<25:49, 3.66s/it] 19%|█▊ | 97/520 [06:32<25:47, 3.66s/it] {'loss': 1.3728, 'grad_norm': 0.0030440118698065273, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:32<25:47, 3.66s/it] 19%|█▉ | 98/520 [06:35<25:44, 3.66s/it] {'loss': 1.3603, 'grad_norm': 0.0022879415469615645, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:35<25:44, 3.66s/it] 19%|█▉ | 99/520 [06:39<25:40, 3.66s/it] {'loss': 1.3953, 'grad_norm': 0.0028888762433074075, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:39<25:40, 3.66s/it] 19%|█▉ | 100/520 [06:43<25:36, 3.66s/it] {'loss': 1.5799, 'grad_norm': 0.003941519975495881, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:43<25:36, 3.66s/it] 19%|█▉ | 101/520 [06:46<25:30, 3.65s/it] {'loss': 1.3841, 'grad_norm': 0.002860956978945567, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:46<25:30, 3.65s/it] 20%|█▉ | 102/520 [06:50<25:26, 3.65s/it] {'loss': 1.3947, 'grad_norm': 0.0027442627886336926, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:50<25:26, 3.65s/it] 20%|█▉ | 103/520 [06:54<25:27, 3.66s/it] {'loss': 1.3178, 'grad_norm': 0.0022746052173926445, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:54<25:27, 3.66s/it] 20%|██ | 104/520 [06:57<25:23, 3.66s/it] {'loss': 1.4053, 'grad_norm': 0.0027519477492038975, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:57<25:23, 3.66s/it] 20%|██ | 105/520 [07:01<25:20, 3.66s/it] {'loss': 1.3891, 'grad_norm': 0.0022220767541722922, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [07:01<25:20, 3.66s/it] 20%|██ | 106/520 [07:05<25:15, 3.66s/it] {'loss': 1.5097, 'grad_norm': 0.002927320848996484, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [07:05<25:15, 3.66s/it] 21%|██ | 107/520 [07:08<25:14, 3.67s/it] {'loss': 1.5069, 'grad_norm': 0.0033243932138392832, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [07:08<25:14, 3.67s/it] 21%|██ | 108/520 [07:12<25:12, 3.67s/it] {'loss': 1.3444, 'grad_norm': 0.0026328847999743856, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [07:12<25:12, 3.67s/it] 21%|██ | 109/520 [07:16<25:17, 3.69s/it] {'loss': 1.4779, 'grad_norm': 0.0030266218221703563, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:16<25:17, 3.69s/it] 21%|██ | 110/520 [07:19<25:06, 3.67s/it] {'loss': 1.5385, 'grad_norm': 0.0024521213997334063, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:19<25:06, 3.67s/it] 21%|██▏ | 111/520 [07:23<25:00, 3.67s/it] {'loss': 1.5493, 'grad_norm': 0.002760251996154162, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:23<25:00, 3.67s/it] 22%|██▏ | 112/520 [07:27<24:54, 3.66s/it] {'loss': 1.4246, 'grad_norm': 0.002472479159448602, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:27<24:54, 3.66s/it] 22%|██▏ | 113/520 [07:30<24:47, 3.66s/it] {'loss': 1.2911, 'grad_norm': 0.0022006702373300693, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:30<24:47, 3.66s/it] 22%|██▏ | 114/520 [07:34<24:47, 3.66s/it] {'loss': 1.397, 'grad_norm': 0.0023956650564479457, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:34<24:47, 3.66s/it] 22%|██▏ | 115/520 [07:38<24:43, 3.66s/it] {'loss': 1.5077, 'grad_norm': 0.0022887371903798563, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:38<24:43, 3.66s/it] 22%|██▏ | 116/520 [07:41<24:38, 3.66s/it] {'loss': 1.5071, 'grad_norm': 0.0021135098075518496, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:41<24:38, 3.66s/it] 22%|██▎ | 117/520 [07:45<24:35, 3.66s/it] {'loss': 1.4889, 'grad_norm': 0.002726961072235173, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:45<24:35, 3.66s/it] 23%|██▎ | 118/520 [07:49<24:29, 3.66s/it] {'loss': 1.3699, 'grad_norm': 0.002086405525488126, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:49<24:29, 3.66s/it] 23%|██▎ | 119/520 [07:52<24:27, 3.66s/it] {'loss': 1.318, 'grad_norm': 0.0022720779838682763, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:52<24:27, 3.66s/it] 23%|██▎ | 120/520 [07:56<24:25, 3.66s/it] {'loss': 1.3587, 'grad_norm': 0.0029026655587084016, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:56<24:25, 3.66s/it] 23%|██▎ | 121/520 [08:00<24:17, 3.65s/it] {'loss': 1.414, 'grad_norm': 0.002759390902954389, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [08:00<24:17, 3.65s/it] 23%|██▎ | 122/520 [08:03<24:13, 3.65s/it] {'loss': 1.3065, 'grad_norm': 0.0022421627901792517, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [08:03<24:13, 3.65s/it] 24%|██▎ | 123/520 [08:07<24:11, 3.66s/it] {'loss': 1.5388, 'grad_norm': 0.0030965993684989946, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [08:07<24:11, 3.66s/it] 24%|██▍ | 124/520 [08:11<24:05, 3.65s/it] {'loss': 1.3857, 'grad_norm': 0.0026370921293616414, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:11<24:05, 3.65s/it] 24%|██▍ | 125/520 [08:14<24:08, 3.67s/it] {'loss': 1.3805, 'grad_norm': 0.002576863017828836, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:14<24:08, 3.67s/it] 24%|██▍ | 126/520 [08:19<25:29, 3.88s/it] {'loss': 1.433, 'grad_norm': 0.002179516331248954, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:19<25:29, 3.88s/it] 24%|██▍ | 127/520 [08:22<25:01, 3.82s/it] {'loss': 1.3419, 'grad_norm': 0.0028374845435622477, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:22<25:01, 3.82s/it] 25%|██▍ | 128/520 [08:26<24:38, 3.77s/it] {'loss': 1.4121, 'grad_norm': 0.002505784613395077, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:26<24:38, 3.77s/it] 25%|██▍ | 129/520 [08:30<24:19, 3.73s/it] {'loss': 1.3234, 'grad_norm': 0.0020957360544601373, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:30<24:19, 3.73s/it] 25%|██▌ | 130/520 [08:33<24:10, 3.72s/it] {'loss': 1.3856, 'grad_norm': 0.002357594218414864, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:33<24:10, 3.72s/it] 25%|██▌ | 131/520 [08:37<23:57, 3.69s/it] {'loss': 1.3996, 'grad_norm': 0.002526509034308905, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:37<23:57, 3.69s/it] 25%|██▌ | 132/520 [08:41<23:54, 3.70s/it] {'loss': 1.4343, 'grad_norm': 0.0024258436151634844, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:41<23:54, 3.70s/it] 26%|██▌ | 133/520 [08:44<23:42, 3.68s/it] {'loss': 1.3329, 'grad_norm': 0.002436883253404905, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:44<23:42, 3.68s/it] 26%|██▌ | 134/520 [08:48<23:36, 3.67s/it] {'loss': 1.4282, 'grad_norm': 0.002708298663676215, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:48<23:36, 3.67s/it] 26%|██▌ | 135/520 [08:52<23:31, 3.67s/it] {'loss': 1.4939, 'grad_norm': 0.0023726221852572453, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:52<23:31, 3.67s/it] 26%|██▌ | 136/520 [08:55<23:27, 3.67s/it] {'loss': 1.4117, 'grad_norm': 0.0025100862144261963, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:55<23:27, 3.67s/it] 26%|██▋ | 137/520 [08:59<23:22, 3.66s/it] {'loss': 1.3408, 'grad_norm': 0.002690092012788814, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:59<23:22, 3.66s/it] 27%|██▋ | 138/520 [09:03<23:17, 3.66s/it] {'loss': 1.3384, 'grad_norm': 0.0021293261943930784, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [09:03<23:17, 3.66s/it] 27%|██▋ | 139/520 [09:06<23:22, 3.68s/it] {'loss': 1.3006, 'grad_norm': 0.0027500491101891877, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:06<23:22, 3.68s/it] 27%|██▋ | 140/520 [09:10<23:14, 3.67s/it] {'loss': 1.4353, 'grad_norm': 0.002606250962648028, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:10<23:14, 3.67s/it] 27%|██▋ | 141/520 [09:14<23:09, 3.67s/it] {'loss': 1.4578, 'grad_norm': 0.0021716024868247722, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:14<23:09, 3.67s/it] 27%|██▋ | 142/520 [09:17<23:04, 3.66s/it] {'loss': 1.4839, 'grad_norm': 0.0022262847064041078, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:17<23:04, 3.66s/it] 28%|██▊ | 143/520 [09:21<22:58, 3.66s/it] {'loss': 1.3728, 'grad_norm': 0.002551045747016629, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:21<22:58, 3.66s/it] 28%|██▊ | 144/520 [09:25<22:51, 3.65s/it] {'loss': 1.3211, 'grad_norm': 0.0023586397518915786, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:25<22:51, 3.65s/it] 28%|██▊ | 145/520 [09:28<22:50, 3.65s/it] {'loss': 1.2636, 'grad_norm': 0.0021056791815857647, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:28<22:50, 3.65s/it] 28%|██▊ | 146/520 [09:32<22:47, 3.66s/it] {'loss': 1.51, 'grad_norm': 0.002300228259714958, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:32<22:47, 3.66s/it] 28%|██▊ | 147/520 [09:36<22:41, 3.65s/it] {'loss': 1.3005, 'grad_norm': 0.002239397483404351, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:36<22:41, 3.65s/it] 28%|██▊ | 148/520 [09:39<22:43, 3.67s/it] {'loss': 1.3343, 'grad_norm': 0.0021996067167534437, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:39<22:43, 3.67s/it] 29%|██▊ | 149/520 [09:43<22:44, 3.68s/it] {'loss': 1.2854, 'grad_norm': 0.002324389491321808, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:43<22:44, 3.68s/it] 29%|██▉ | 150/520 [09:47<22:38, 3.67s/it] {'loss': 1.5235, 'grad_norm': 0.0024322377052448784, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:47<22:38, 3.67s/it] 29%|██▉ | 151/520 [09:50<22:29, 3.66s/it] {'loss': 1.3221, 'grad_norm': 0.002149899942193201, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:50<22:29, 3.66s/it] 29%|██▉ | 152/520 [09:54<22:25, 3.66s/it] {'loss': 1.2957, 'grad_norm': 0.0022953703066304005, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:54<22:25, 3.66s/it] 29%|██▉ | 153/520 [09:58<22:20, 3.65s/it] {'loss': 1.3276, 'grad_norm': 0.0023161445705277817, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:58<22:20, 3.65s/it] 30%|██▉ | 154/520 [10:01<22:17, 3.65s/it] {'loss': 1.4223, 'grad_norm': 0.0022033560855891005, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [10:01<22:17, 3.65s/it] 30%|██▉ | 155/520 [10:05<22:12, 3.65s/it] {'loss': 1.3282, 'grad_norm': 0.0022140618430697627, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:05<22:12, 3.65s/it] 30%|███ | 156/520 [10:08<22:10, 3.65s/it] {'loss': 1.3556, 'grad_norm': 0.0023319123821954483, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [10:08<22:10, 3.65s/it] 30%|███ | 157/520 [10:12<22:15, 3.68s/it] {'loss': 1.5074, 'grad_norm': 0.0023608883068934714, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:12<22:15, 3.68s/it] 30%|███ | 158/520 [10:16<22:14, 3.69s/it] {'loss': 1.3328, 'grad_norm': 0.0025009399862351333, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:16<22:14, 3.69s/it] 31%|███ | 159/520 [10:20<22:07, 3.68s/it] {'loss': 1.3689, 'grad_norm': 0.0022424748295867825, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:20<22:07, 3.68s/it] 31%|███ | 160/520 [10:23<22:04, 3.68s/it] {'loss': 1.3914, 'grad_norm': 0.0022931638055098316, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:23<22:04, 3.68s/it] 31%|███ | 161/520 [10:27<22:00, 3.68s/it] {'loss': 1.3705, 'grad_norm': 0.002238002356197148, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:27<22:00, 3.68s/it] 31%|███ | 162/520 [10:31<21:55, 3.68s/it] {'loss': 1.4382, 'grad_norm': 0.0024524260669198366, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:31<21:55, 3.68s/it] 31%|███▏ | 163/520 [10:34<21:49, 3.67s/it] {'loss': 1.2492, 'grad_norm': 0.002809248407903121, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:34<21:49, 3.67s/it] 32%|███▏ | 164/520 [10:38<21:46, 3.67s/it] {'loss': 1.2266, 'grad_norm': 0.00222642564355267, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:38<21:46, 3.67s/it] 32%|███▏ | 165/520 [10:42<21:42, 3.67s/it] {'loss': 1.361, 'grad_norm': 0.002216879338385997, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:42<21:42, 3.67s/it] 32%|███▏ | 166/520 [10:45<21:34, 3.66s/it] {'loss': 1.3535, 'grad_norm': 0.002432867972410521, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:45<21:34, 3.66s/it] 32%|███▏ | 167/520 [10:49<21:33, 3.66s/it] {'loss': 1.3425, 'grad_norm': 0.0025264319993981675, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:49<21:33, 3.66s/it] 32%|███▏ | 168/520 [10:53<21:31, 3.67s/it] {'loss': 1.2743, 'grad_norm': 0.0022546398980258084, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:53<21:31, 3.67s/it] 32%|███▎ | 169/520 [10:56<21:34, 3.69s/it] {'loss': 1.3587, 'grad_norm': 0.002110884255396401, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:56<21:34, 3.69s/it] 33%|███▎ | 170/520 [11:00<21:31, 3.69s/it] {'loss': 1.3607, 'grad_norm': 0.0025254681375944782, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [11:00<21:31, 3.69s/it] 33%|███▎ | 171/520 [11:04<21:27, 3.69s/it] {'loss': 1.2879, 'grad_norm': 0.0025024833857907962, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:04<21:27, 3.69s/it] 33%|███▎ | 172/520 [11:07<21:24, 3.69s/it] {'loss': 1.3558, 'grad_norm': 0.0020713314627560813, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:07<21:24, 3.69s/it] 33%|███▎ | 173/520 [11:11<21:18, 3.68s/it] {'loss': 1.2928, 'grad_norm': 0.00212829754599686, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:11<21:18, 3.68s/it] 33%|███▎ | 174/520 [11:15<21:20, 3.70s/it] {'loss': 1.3657, 'grad_norm': 0.002683871766908695, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:15<21:20, 3.70s/it] 34%|███▎ | 175/520 [11:18<21:16, 3.70s/it] {'loss': 1.2634, 'grad_norm': 0.002188568178134752, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:18<21:16, 3.70s/it] 34%|███▍ | 176/520 [11:22<21:13, 3.70s/it] {'loss': 1.4466, 'grad_norm': 0.0021305067715183183, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:22<21:13, 3.70s/it] 34%|███▍ | 177/520 [11:26<21:10, 3.70s/it] {'loss': 1.3128, 'grad_norm': 0.002616998301077292, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:26<21:10, 3.70s/it] 34%|███▍ | 178/520 [11:30<21:05, 3.70s/it] {'loss': 1.3387, 'grad_norm': 0.002379823352027356, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:30<21:05, 3.70s/it] 34%|███▍ | 179/520 [11:33<21:02, 3.70s/it] {'loss': 1.4228, 'grad_norm': 0.002166200688760018, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:33<21:02, 3.70s/it] 35%|███▍ | 180/520 [11:37<21:02, 3.71s/it] {'loss': 1.3285, 'grad_norm': 0.002300613043529658, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:37<21:02, 3.71s/it] 35%|███▍ | 181/520 [11:41<20:57, 3.71s/it] {'loss': 1.3023, 'grad_norm': 0.0020678948413080872, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:41<20:57, 3.71s/it] 35%|███▌ | 182/520 [11:44<20:55, 3.71s/it] {'loss': 1.3148, 'grad_norm': 0.002317602691948382, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:44<20:55, 3.71s/it] 35%|███▌ | 183/520 [11:48<20:53, 3.72s/it] {'loss': 1.347, 'grad_norm': 0.0022552505891952476, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:48<20:53, 3.72s/it] 35%|███▌ | 184/520 [11:52<20:49, 3.72s/it] {'loss': 1.252, 'grad_norm': 0.002093890599780114, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:52<20:49, 3.72s/it] 36%|███▌ | 185/520 [11:56<20:41, 3.71s/it] {'loss': 1.4337, 'grad_norm': 0.0020817318976471544, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:56<20:41, 3.71s/it] 36%|███▌ | 186/520 [11:59<20:33, 3.69s/it] {'loss': 1.2856, 'grad_norm': 0.002175943391478626, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:59<20:33, 3.69s/it] 36%|███▌ | 187/520 [12:03<20:28, 3.69s/it] {'loss': 1.293, 'grad_norm': 0.002670071863881669, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:03<20:28, 3.69s/it] 36%|███▌ | 188/520 [12:07<20:22, 3.68s/it] {'loss': 1.3737, 'grad_norm': 0.0023370921198907313, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:07<20:22, 3.68s/it] 36%|███▋ | 189/520 [12:10<20:17, 3.68s/it] {'loss': 1.3817, 'grad_norm': 0.0019762611308436753, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:10<20:17, 3.68s/it] 37%|███▋ | 190/520 [12:14<20:11, 3.67s/it] {'loss': 1.2987, 'grad_norm': 0.002329567247518307, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:14<20:11, 3.67s/it] 37%|███▋ | 191/520 [12:18<20:04, 3.66s/it] {'loss': 1.2511, 'grad_norm': 0.001997936941213124, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:18<20:04, 3.66s/it] 37%|███▋ | 192/520 [12:21<20:00, 3.66s/it] {'loss': 1.3414, 'grad_norm': 0.00211853252996266, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:21<20:00, 3.66s/it] 37%|███▋ | 193/520 [12:25<19:58, 3.66s/it] {'loss': 1.3791, 'grad_norm': 0.0027667277335461514, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:25<19:58, 3.66s/it] 37%|███▋ | 194/520 [12:29<19:53, 3.66s/it] {'loss': 1.2535, 'grad_norm': 0.0022449647287118217, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:29<19:53, 3.66s/it] 38%|███▊ | 195/520 [12:32<19:48, 3.66s/it] {'loss': 1.3629, 'grad_norm': 0.002181250361039846, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:32<19:48, 3.66s/it] 38%|███▊ | 196/520 [12:36<19:42, 3.65s/it] {'loss': 1.3246, 'grad_norm': 0.002321779412873477, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:36<19:42, 3.65s/it] 38%|███▊ | 197/520 [12:40<19:40, 3.66s/it] {'loss': 1.2866, 'grad_norm': 0.0020686593494554243, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:40<19:40, 3.66s/it] 38%|███▊ | 198/520 [12:43<19:41, 3.67s/it] {'loss': 1.3605, 'grad_norm': 0.0023000022712189134, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:43<19:41, 3.67s/it] 38%|███▊ | 199/520 [12:47<19:51, 3.71s/it] {'loss': 1.273, 'grad_norm': 0.0022076514329417095, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:47<19:51, 3.71s/it] 38%|███▊ | 200/520 [12:51<19:55, 3.74s/it] {'loss': 1.3133, 'grad_norm': 0.0022177136557248924, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:51<19:55, 3.74s/it] 39%|███▊ | 201/520 [12:55<20:00, 3.76s/it] {'loss': 1.3231, 'grad_norm': 0.001995041024939847, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:55<20:00, 3.76s/it] 39%|███▉ | 202/520 [12:58<20:00, 3.78s/it] {'loss': 1.2705, 'grad_norm': 0.0021066997398920006, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:58<20:00, 3.78s/it] 39%|███▉ | 203/520 [13:02<20:01, 3.79s/it] {'loss': 1.3269, 'grad_norm': 0.0022307285202313887, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:02<20:01, 3.79s/it] 39%|███▉ | 204/520 [13:06<19:55, 3.78s/it] {'loss': 1.356, 'grad_norm': 0.0022456472968919263, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:06<19:55, 3.78s/it] 39%|███▉ | 205/520 [13:10<19:54, 3.79s/it] {'loss': 1.332, 'grad_norm': 0.0022137614456023895, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:10<19:54, 3.79s/it] 40%|███▉ | 206/520 [13:14<19:53, 3.80s/it] {'loss': 1.3884, 'grad_norm': 0.002223767629941832, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:14<19:53, 3.80s/it] 40%|███▉ | 207/520 [13:17<19:45, 3.79s/it] {'loss': 1.3113, 'grad_norm': 0.001978251774077043, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:17<19:45, 3.79s/it] 40%|████ | 208/520 [13:21<19:28, 3.74s/it] {'loss': 1.3619, 'grad_norm': 0.0023866598263627106, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:21<19:28, 3.74s/it] 40%|████ | 209/520 [13:25<19:34, 3.78s/it] {'loss': 1.2743, 'grad_norm': 0.0021334039900033543, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:25<19:34, 3.78s/it] 40%|████ | 210/520 [13:29<19:39, 3.80s/it] {'loss': 1.3672, 'grad_norm': 0.0024092121271209843, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:29<19:39, 3.80s/it] 41%|████ | 211/520 [13:33<19:42, 3.83s/it] {'loss': 1.3658, 'grad_norm': 0.0020184547904940325, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:33<19:42, 3.83s/it] 41%|████ | 212/520 [13:37<19:41, 3.84s/it] {'loss': 1.335, 'grad_norm': 0.002050654995606541, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:37<19:41, 3.84s/it] 41%|████ | 213/520 [13:40<19:40, 3.85s/it] {'loss': 1.2967, 'grad_norm': 0.0025549158533037302, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:40<19:40, 3.85s/it] 41%|████ | 214/520 [13:44<19:40, 3.86s/it] {'loss': 1.2962, 'grad_norm': 0.0022745334333578197, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:44<19:40, 3.86s/it] 41%|████▏ | 215/520 [13:48<19:37, 3.86s/it] {'loss': 1.2464, 'grad_norm': 0.0020697625414465163, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:48<19:37, 3.86s/it] 42%|████▏ | 216/520 [13:52<19:33, 3.86s/it] {'loss': 1.207, 'grad_norm': 0.0021373619344671815, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:52<19:33, 3.86s/it] 42%|████▏ | 217/520 [13:56<19:28, 3.86s/it] {'loss': 1.3305, 'grad_norm': 0.0022165837727050617, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:56<19:28, 3.86s/it] 42%|████▏ | 218/520 [14:00<19:25, 3.86s/it] {'loss': 1.3219, 'grad_norm': 0.002222624230791647, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [14:00<19:25, 3.86s/it] 42%|████▏ | 219/520 [14:04<19:21, 3.86s/it] {'loss': 1.3065, 'grad_norm': 0.001902789879612392, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:04<19:21, 3.86s/it] 42%|████▏ | 220/520 [14:07<19:17, 3.86s/it] {'loss': 1.3014, 'grad_norm': 0.002149652288989638, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:07<19:17, 3.86s/it] 42%|████▎ | 221/520 [14:11<19:13, 3.86s/it] {'loss': 1.333, 'grad_norm': 0.002140305736250439, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:11<19:13, 3.86s/it] 43%|████▎ | 222/520 [14:15<19:10, 3.86s/it] {'loss': 1.2431, 'grad_norm': 0.00202419714033892, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:15<19:10, 3.86s/it] 43%|████▎ | 223/520 [14:19<19:07, 3.86s/it] {'loss': 1.2381, 'grad_norm': 0.00196252145507785, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:19<19:07, 3.86s/it] 43%|████▎ | 224/520 [14:23<19:06, 3.87s/it] {'loss': 1.4417, 'grad_norm': 0.002903979326748579, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:23<19:06, 3.87s/it] 43%|████▎ | 225/520 [14:27<19:07, 3.89s/it] {'loss': 1.2577, 'grad_norm': 0.002073484947349893, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:27<19:07, 3.89s/it] 43%|████▎ | 226/520 [14:31<18:53, 3.85s/it] {'loss': 1.3559, 'grad_norm': 0.0020073258622259654, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:31<18:53, 3.85s/it] 44%|████▎ | 227/520 [14:34<18:46, 3.85s/it] {'loss': 1.3469, 'grad_norm': 0.0019668218159059883, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:34<18:46, 3.85s/it] 44%|████▍ | 228/520 [14:38<18:35, 3.82s/it] {'loss': 1.4415, 'grad_norm': 0.0022070776214574606, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:38<18:35, 3.82s/it] 44%|████▍ | 229/520 [14:42<18:26, 3.80s/it] {'loss': 1.3129, 'grad_norm': 0.001893463332570683, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:42<18:26, 3.80s/it] 44%|████▍ | 230/520 [14:46<18:20, 3.79s/it] {'loss': 1.1946, 'grad_norm': 0.002101866909860861, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:46<18:20, 3.79s/it] 44%|████▍ | 231/520 [14:50<18:15, 3.79s/it] {'loss': 1.2585, 'grad_norm': 0.0018949222642758244, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:50<18:15, 3.79s/it] 45%|████▍ | 232/520 [14:53<18:08, 3.78s/it] {'loss': 1.4598, 'grad_norm': 0.0022260875114653114, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:53<18:08, 3.78s/it] 45%|████▍ | 233/520 [14:57<18:03, 3.77s/it] {'loss': 1.352, 'grad_norm': 0.002375923651721686, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:57<18:03, 3.77s/it] 45%|████▌ | 234/520 [15:01<17:59, 3.78s/it] {'loss': 1.2102, 'grad_norm': 0.002146855885041871, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [15:01<17:59, 3.78s/it] 45%|████▌ | 235/520 [15:05<17:56, 3.78s/it] {'loss': 1.2618, 'grad_norm': 0.0021267523187806195, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:05<17:56, 3.78s/it] 45%|████▌ | 236/520 [15:08<17:53, 3.78s/it] {'loss': 1.3676, 'grad_norm': 0.0019483547198830774, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:08<17:53, 3.78s/it] 46%|████▌ | 237/520 [15:12<17:51, 3.78s/it] {'loss': 1.337, 'grad_norm': 0.0020242986962206127, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:12<17:51, 3.78s/it] 46%|████▌ | 238/520 [15:16<17:47, 3.79s/it] {'loss': 1.2783, 'grad_norm': 0.0021187632861568496, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:16<17:47, 3.79s/it] 46%|████▌ | 239/520 [15:20<17:44, 3.79s/it] {'loss': 1.3724, 'grad_norm': 0.002126051302086944, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:20<17:44, 3.79s/it] 46%|████▌ | 240/520 [15:24<18:02, 3.87s/it] {'loss': 1.1528, 'grad_norm': 0.0021523542414268815, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:24<18:02, 3.87s/it] 46%|████▋ | 241/520 [15:28<18:13, 3.92s/it] {'loss': 1.244, 'grad_norm': 0.0019520126072338948, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:28<18:13, 3.92s/it] 47%|████▋ | 242/520 [15:32<18:18, 3.95s/it] {'loss': 1.2628, 'grad_norm': 0.001895070270779323, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:32<18:18, 3.95s/it] 47%|████▋ | 243/520 [15:36<18:22, 3.98s/it] {'loss': 1.2428, 'grad_norm': 0.0020786830018919137, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:36<18:22, 3.98s/it] 47%|████▋ | 244/520 [15:40<18:23, 4.00s/it] {'loss': 1.3768, 'grad_norm': 0.002077234249590834, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:40<18:23, 4.00s/it] 47%|████▋ | 245/520 [15:44<18:18, 3.99s/it] {'loss': 1.231, 'grad_norm': 0.001991229929672168, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:44<18:18, 3.99s/it] 47%|████▋ | 246/520 [15:48<18:09, 3.98s/it] {'loss': 1.4353, 'grad_norm': 0.0022667298896370622, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:48<18:09, 3.98s/it] 48%|████▊ | 247/520 [15:52<17:56, 3.94s/it] {'loss': 1.4175, 'grad_norm': 0.0020996335472251854, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:52<17:56, 3.94s/it] 48%|████▊ | 248/520 [15:56<17:48, 3.93s/it] {'loss': 1.2407, 'grad_norm': 0.00211834604658526, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:56<17:48, 3.93s/it] 48%|████▊ | 249/520 [16:00<17:41, 3.92s/it] {'loss': 1.3399, 'grad_norm': 0.0020250426443239006, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [16:00<17:41, 3.92s/it] 48%|████▊ | 250/520 [16:03<17:38, 3.92s/it] {'loss': 1.2702, 'grad_norm': 0.0022040141149937014, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:03<17:38, 3.92s/it] 48%|████▊ | 251/520 [16:07<17:33, 3.92s/it] {'loss': 1.3334, 'grad_norm': 0.0018463984351425136, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:07<17:33, 3.92s/it] 48%|████▊ | 252/520 [16:11<17:27, 3.91s/it] {'loss': 1.3259, 'grad_norm': 0.002062461339980796, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:11<17:27, 3.91s/it] 49%|████▊ | 253/520 [16:15<17:25, 3.92s/it] {'loss': 1.3373, 'grad_norm': 0.0022668438610670546, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:15<17:25, 3.92s/it] 49%|████▉ | 254/520 [16:19<17:20, 3.91s/it] {'loss': 1.2587, 'grad_norm': 0.0019709409996649927, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:19<17:20, 3.91s/it] 49%|████▉ | 255/520 [16:23<17:15, 3.91s/it] {'loss': 1.2654, 'grad_norm': 0.002358555199535905, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:23<17:15, 3.91s/it] 49%|████▉ | 256/520 [16:27<17:07, 3.89s/it] {'loss': 1.3145, 'grad_norm': 0.0021807676551600748, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:27<17:07, 3.89s/it] 49%|████▉ | 257/520 [16:31<17:02, 3.89s/it] {'loss': 1.3082, 'grad_norm': 0.00210935451445596, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:31<17:02, 3.89s/it] 50%|████▉ | 258/520 [16:35<17:02, 3.90s/it] {'loss': 1.3176, 'grad_norm': 0.001833672059702039, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:35<17:02, 3.90s/it] 50%|████▉ | 259/520 [16:39<16:56, 3.90s/it] {'loss': 1.3763, 'grad_norm': 0.0023121183710349278, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:39<16:56, 3.90s/it] 50%|█████ | 260/520 [16:42<16:51, 3.89s/it] {'loss': 1.3912, 'grad_norm': 0.0019114499383331097, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:42<16:51, 3.89s/it] 50%|█████ | 261/520 [16:46<16:48, 3.89s/it] {'loss': 1.3262, 'grad_norm': 0.002083819239292315, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:46<16:48, 3.89s/it] 50%|█████ | 262/520 [16:50<16:44, 3.89s/it] {'loss': 1.227, 'grad_norm': 0.002056218852959444, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:50<16:44, 3.89s/it] 51%|█████ | 263/520 [16:54<16:37, 3.88s/it] {'loss': 1.3349, 'grad_norm': 0.0022317502348403557, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:54<16:37, 3.88s/it] 51%|█████ | 264/520 [16:58<16:35, 3.89s/it] {'loss': 1.345, 'grad_norm': 0.0020041319170026935, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:58<16:35, 3.89s/it] 51%|█████ | 265/520 [17:02<16:29, 3.88s/it] {'loss': 1.2491, 'grad_norm': 0.0022637206867329247, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [17:02<16:29, 3.88s/it] 51%|█████ | 266/520 [17:06<16:10, 3.82s/it] {'loss': 1.1071, 'grad_norm': 0.0018081707605127357, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [17:06<16:10, 3.82s/it] 51%|█████▏ | 267/520 [17:09<15:58, 3.79s/it] {'loss': 1.2427, 'grad_norm': 0.0019625210756411436, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:09<15:58, 3.79s/it] 52%|█████▏ | 268/520 [17:13<15:47, 3.76s/it] {'loss': 1.4547, 'grad_norm': 0.002801571767908265, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:13<15:47, 3.76s/it] 52%|█████▏ | 269/520 [17:17<15:36, 3.73s/it] {'loss': 1.3544, 'grad_norm': 0.0020869611517181524, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:17<15:36, 3.73s/it] 52%|█████▏ | 270/520 [17:20<15:29, 3.72s/it] {'loss': 1.2564, 'grad_norm': 0.0020072882791194735, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:20<15:29, 3.72s/it] 52%|█████▏ | 271/520 [17:24<15:22, 3.70s/it] {'loss': 1.3399, 'grad_norm': 0.0020855332285055245, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:24<15:22, 3.70s/it] 52%|█████▏ | 272/520 [17:28<15:26, 3.74s/it] {'loss': 1.2761, 'grad_norm': 0.0023643542112158663, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:28<15:26, 3.74s/it] 52%|█████▎ | 273/520 [17:32<15:26, 3.75s/it] {'loss': 1.402, 'grad_norm': 0.0022466985610632706, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:32<15:26, 3.75s/it] 53%|█████▎ | 274/520 [17:35<15:28, 3.77s/it] {'loss': 1.2993, 'grad_norm': 0.0021680756944424525, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:35<15:28, 3.77s/it] 53%|█████▎ | 275/520 [17:39<15:30, 3.80s/it] {'loss': 1.2462, 'grad_norm': 0.0022544634403201596, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:39<15:30, 3.80s/it] 53%|█████▎ | 276/520 [17:43<15:27, 3.80s/it] {'loss': 1.3169, 'grad_norm': 0.0022214268263033676, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:43<15:27, 3.80s/it] 53%|█████▎ | 277/520 [17:47<15:27, 3.81s/it] {'loss': 1.3931, 'grad_norm': 0.002116906768352636, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:47<15:27, 3.81s/it] 53%|█████▎ | 278/520 [17:51<15:26, 3.83s/it] {'loss': 1.1896, 'grad_norm': 0.0019625744052966746, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:51<15:26, 3.83s/it] 54%|█████▎ | 279/520 [17:55<15:22, 3.83s/it] {'loss': 1.2907, 'grad_norm': 0.0023881032655934517, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:55<15:22, 3.83s/it] 54%|█████▍ | 280/520 [17:58<15:19, 3.83s/it] {'loss': 1.2475, 'grad_norm': 0.0022233343249106426, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:58<15:19, 3.83s/it] 54%|█████▍ | 281/520 [18:02<15:15, 3.83s/it] {'loss': 1.3548, 'grad_norm': 0.0021820325776319093, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [18:02<15:15, 3.83s/it] 54%|█████▍ | 282/520 [18:06<15:10, 3.83s/it] {'loss': 1.2059, 'grad_norm': 0.0019281792826664724, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [18:06<15:10, 3.83s/it] 54%|█████▍ | 283/520 [18:10<15:12, 3.85s/it] {'loss': 1.3743, 'grad_norm': 0.0022558527184326767, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:10<15:12, 3.85s/it] 55%|█████▍ | 284/520 [18:14<15:08, 3.85s/it] {'loss': 1.2602, 'grad_norm': 0.0022013683466595483, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:14<15:08, 3.85s/it] 55%|█████▍ | 285/520 [18:18<15:02, 3.84s/it] {'loss': 1.2416, 'grad_norm': 0.0021088914282072694, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:18<15:02, 3.84s/it] 55%|█████▌ | 286/520 [18:21<14:56, 3.83s/it] {'loss': 1.1097, 'grad_norm': 0.002421968343050719, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:21<14:56, 3.83s/it] 55%|█████▌ | 287/520 [18:25<14:52, 3.83s/it] {'loss': 1.3478, 'grad_norm': 0.002149608984808022, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:25<14:52, 3.83s/it] 55%|█████▌ | 288/520 [18:29<14:47, 3.83s/it] {'loss': 1.3903, 'grad_norm': 0.002240927713154325, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:29<14:47, 3.83s/it] 56%|█████▌ | 289/520 [18:33<14:47, 3.84s/it] {'loss': 1.2491, 'grad_norm': 0.0019480807360922076, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:33<14:47, 3.84s/it] 56%|█████▌ | 290/520 [18:37<14:41, 3.83s/it] {'loss': 1.1715, 'grad_norm': 0.0019405894146606974, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:37<14:41, 3.83s/it] 56%|█████▌ | 291/520 [18:41<14:37, 3.83s/it] {'loss': 1.2271, 'grad_norm': 0.0022188579607928796, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:41<14:37, 3.83s/it] 56%|█████▌ | 292/520 [18:44<14:38, 3.85s/it] {'loss': 1.2839, 'grad_norm': 0.0020646328459035176, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:44<14:38, 3.85s/it] 56%|█████▋ | 293/520 [18:48<14:32, 3.84s/it] {'loss': 1.2169, 'grad_norm': 0.0022279117848198336, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:48<14:32, 3.84s/it] 57%|█████▋ | 294/520 [18:52<14:27, 3.84s/it] {'loss': 1.2482, 'grad_norm': 0.002260311796343891, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:52<14:27, 3.84s/it] 57%|█████▋ | 295/520 [18:56<14:24, 3.84s/it] {'loss': 1.3245, 'grad_norm': 0.0021874740624762577, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:56<14:24, 3.84s/it] 57%|█████▋ | 296/520 [19:00<14:19, 3.84s/it] {'loss': 1.192, 'grad_norm': 0.002263188909590991, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [19:00<14:19, 3.84s/it] 57%|█████▋ | 297/520 [19:04<14:14, 3.83s/it] {'loss': 1.3149, 'grad_norm': 0.0022432307701796723, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [19:04<14:14, 3.83s/it] 57%|█████▋ | 298/520 [19:07<14:07, 3.82s/it] {'loss': 1.2801, 'grad_norm': 0.0018194058063835852, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:07<14:07, 3.82s/it] 57%|█████▊ | 299/520 [19:11<14:05, 3.83s/it] {'loss': 1.3419, 'grad_norm': 0.0018883898122110027, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:11<14:05, 3.83s/it] 58%|█████▊ | 300/520 [19:15<14:01, 3.83s/it] {'loss': 1.3438, 'grad_norm': 0.002069795429208899, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:15<14:01, 3.83s/it] 58%|█████▊ | 301/520 [19:19<13:57, 3.83s/it] {'loss': 1.3114, 'grad_norm': 0.0020835236970579716, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:19<13:57, 3.83s/it] 58%|█████▊ | 302/520 [19:23<13:52, 3.82s/it] {'loss': 1.3663, 'grad_norm': 0.002083972141288684, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:23<13:52, 3.82s/it] 58%|█████▊ | 303/520 [19:27<13:49, 3.82s/it] {'loss': 1.2495, 'grad_norm': 0.0024644588175621643, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:27<13:49, 3.82s/it] 58%|█████▊ | 304/520 [19:30<13:49, 3.84s/it] {'loss': 1.2534, 'grad_norm': 0.0022955536456417, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:30<13:49, 3.84s/it] 59%|█████▊ | 305/520 [19:34<13:47, 3.85s/it] {'loss': 1.3546, 'grad_norm': 0.0023504298454661726, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:34<13:47, 3.85s/it] 59%|█████▉ | 306/520 [19:38<13:43, 3.85s/it] {'loss': 1.2916, 'grad_norm': 0.0021942822131679365, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:38<13:43, 3.85s/it] 59%|█████▉ | 307/520 [19:42<14:00, 3.95s/it] {'loss': 1.2261, 'grad_norm': 0.0018906499788314559, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:42<14:00, 3.95s/it] 59%|█████▉ | 308/520 [19:46<13:50, 3.92s/it] {'loss': 1.3498, 'grad_norm': 0.00202390831972979, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:46<13:50, 3.92s/it] 59%|█████▉ | 309/520 [19:50<13:39, 3.88s/it] {'loss': 1.2281, 'grad_norm': 0.00190911458791959, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:50<13:39, 3.88s/it] 60%|█████▉ | 310/520 [19:54<13:32, 3.87s/it] {'loss': 1.2054, 'grad_norm': 0.001973453557116991, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:54<13:32, 3.87s/it] 60%|█████▉ | 311/520 [19:58<13:25, 3.85s/it] {'loss': 1.1812, 'grad_norm': 0.0020602401485406542, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:58<13:25, 3.85s/it] 60%|██████ | 312/520 [20:01<13:21, 3.85s/it] {'loss': 1.1714, 'grad_norm': 0.002241406515968785, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [20:01<13:21, 3.85s/it] 60%|██████ | 313/520 [20:05<13:14, 3.84s/it] {'loss': 1.1617, 'grad_norm': 0.0018627037590485838, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [20:05<13:14, 3.84s/it] 60%|██████ | 314/520 [20:09<13:24, 3.90s/it] {'loss': 1.1997, 'grad_norm': 0.0018807130968140004, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [20:09<13:24, 3.90s/it] 61%|██████ | 315/520 [20:13<13:08, 3.85s/it] {'loss': 1.3268, 'grad_norm': 0.003489882849911564, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [20:13<13:08, 3.85s/it] 61%|██████ | 316/520 [20:17<13:16, 3.91s/it] {'loss': 1.1741, 'grad_norm': 0.002499094548921943, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:17<13:16, 3.91s/it] 61%|██████ | 317/520 [20:21<13:01, 3.85s/it] {'loss': 1.1933, 'grad_norm': 0.0018410290646500573, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:21<13:01, 3.85s/it] 61%|██████ | 318/520 [20:25<12:49, 3.81s/it] {'loss': 1.3195, 'grad_norm': 0.00222078332786264, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:25<12:49, 3.81s/it] 61%|██████▏ | 319/520 [20:29<13:02, 3.89s/it] {'loss': 1.1799, 'grad_norm': 0.001998520073235977, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:29<13:02, 3.89s/it] 62%|██████▏ | 320/520 [20:32<12:45, 3.83s/it] {'loss': 1.1242, 'grad_norm': 0.0020911647790375684, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:32<12:45, 3.83s/it] 62%|██████▏ | 321/520 [20:36<12:35, 3.79s/it] {'loss': 1.3204, 'grad_norm': 0.002131078380450351, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:36<12:35, 3.79s/it] 62%|██████▏ | 322/520 [20:40<12:25, 3.77s/it] {'loss': 1.1883, 'grad_norm': 0.002055280057026195, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:40<12:25, 3.77s/it] 62%|██████▏ | 323/520 [20:43<12:20, 3.76s/it] {'loss': 1.2656, 'grad_norm': 0.0021828472468183578, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:43<12:20, 3.76s/it] 62%|██████▏ | 324/520 [20:47<12:12, 3.74s/it] {'loss': 1.2566, 'grad_norm': 0.0021646795321687037, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:47<12:12, 3.74s/it] 62%|██████▎ | 325/520 [20:51<12:08, 3.73s/it] {'loss': 1.2693, 'grad_norm': 0.0021422987857345394, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:51<12:08, 3.73s/it] 63%|██████▎ | 326/520 [20:55<12:03, 3.73s/it] {'loss': 1.2527, 'grad_norm': 0.0020996086280491086, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:55<12:03, 3.73s/it] 63%|██████▎ | 327/520 [20:58<11:57, 3.72s/it] {'loss': 1.3336, 'grad_norm': 0.0024357736888388254, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:58<11:57, 3.72s/it] 63%|██████▎ | 328/520 [21:02<11:51, 3.71s/it] {'loss': 1.3153, 'grad_norm': 0.0021105715646344944, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [21:02<11:51, 3.71s/it] 63%|██████▎ | 329/520 [21:06<11:47, 3.70s/it] {'loss': 1.1741, 'grad_norm': 0.0017366313223448786, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:06<11:47, 3.70s/it] 63%|██████▎ | 330/520 [21:09<11:44, 3.71s/it] {'loss': 1.2539, 'grad_norm': 0.0018811448967815912, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:09<11:44, 3.71s/it] 64%|██████▎ | 331/520 [21:13<11:39, 3.70s/it] {'loss': 1.2091, 'grad_norm': 0.0019325393699883163, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:13<11:39, 3.70s/it] 64%|██████▍ | 332/520 [21:17<11:37, 3.71s/it] {'loss': 1.3417, 'grad_norm': 0.00203652273886767, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:17<11:37, 3.71s/it] 64%|██████▍ | 333/520 [21:21<11:33, 3.71s/it] {'loss': 1.3682, 'grad_norm': 0.002132405941635854, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:21<11:33, 3.71s/it] 64%|██████▍ | 334/520 [21:24<11:31, 3.72s/it] {'loss': 1.2548, 'grad_norm': 0.002269753019888245, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:24<11:31, 3.72s/it] 64%|██████▍ | 335/520 [21:28<11:25, 3.71s/it] {'loss': 1.2513, 'grad_norm': 0.0018393281578664123, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:28<11:25, 3.71s/it] 65%|██████▍ | 336/520 [21:32<11:22, 3.71s/it] {'loss': 1.1445, 'grad_norm': 0.002211192681573828, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:32<11:22, 3.71s/it] 65%|██████▍ | 337/520 [21:35<11:18, 3.71s/it] {'loss': 1.134, 'grad_norm': 0.002012751382525582, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:35<11:18, 3.71s/it] 65%|██████▌ | 338/520 [21:39<11:15, 3.71s/it] {'loss': 1.2642, 'grad_norm': 0.0020375398326444587, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:39<11:15, 3.71s/it] 65%|██████▌ | 339/520 [21:43<11:11, 3.71s/it] {'loss': 1.2077, 'grad_norm': 0.0019593543879817925, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:43<11:11, 3.71s/it] 65%|██████▌ | 340/520 [21:47<11:13, 3.74s/it] {'loss': 1.1953, 'grad_norm': 0.0019899634037773566, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:47<11:13, 3.74s/it] 66%|██████▌ | 341/520 [21:50<11:10, 3.75s/it] {'loss': 1.2222, 'grad_norm': 0.002150047368515498, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:50<11:10, 3.75s/it] 66%|██████▌ | 342/520 [21:54<11:09, 3.76s/it] {'loss': 1.3012, 'grad_norm': 0.0024162213215534792, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:54<11:09, 3.76s/it] 66%|██████▌ | 343/520 [21:58<11:05, 3.76s/it] {'loss': 1.269, 'grad_norm': 0.0020954187057911065, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:58<11:05, 3.76s/it] 66%|██████▌ | 344/520 [22:02<10:56, 3.73s/it] {'loss': 1.1651, 'grad_norm': 0.0020954730491977397, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [22:02<10:56, 3.73s/it] 66%|██████▋ | 345/520 [22:05<10:49, 3.71s/it] {'loss': 1.2862, 'grad_norm': 0.0023169909348817776, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:05<10:49, 3.71s/it] 67%|██████▋ | 346/520 [22:09<10:45, 3.71s/it] {'loss': 1.2644, 'grad_norm': 0.0019153142121830437, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:09<10:45, 3.71s/it] 67%|██████▋ | 347/520 [22:13<10:38, 3.69s/it] {'loss': 1.1829, 'grad_norm': 0.0018662160830692001, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:13<10:38, 3.69s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:16<10:34, 3.69s/it] {'loss': 1.1468, 'grad_norm': 0.002401100728520033, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:16<10:34, 3.69s/it] 67%|██████▋ | 349/520 [22:20<10:27, 3.67s/it] {'loss': 1.1897, 'grad_norm': 0.00219208146652065, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:20<10:27, 3.67s/it] 67%|██████▋ | 350/520 [22:24<10:25, 3.68s/it] {'loss': 1.2291, 'grad_norm': 0.0021218460675884263, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:24<10:25, 3.68s/it] 68%|██████▊ | 351/520 [22:27<10:20, 3.67s/it] {'loss': 1.1362, 'grad_norm': 0.0019006936879256283, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:27<10:20, 3.67s/it] 68%|██████▊ | 352/520 [22:31<10:15, 3.67s/it] {'loss': 1.2602, 'grad_norm': 0.0020095327830929053, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:31<10:15, 3.67s/it] 68%|██████▊ | 353/520 [22:35<10:16, 3.69s/it] {'loss': 1.2163, 'grad_norm': 0.0017141793630610848, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:35<10:16, 3.69s/it] 68%|██████▊ | 354/520 [22:38<10:09, 3.67s/it] {'loss': 1.3499, 'grad_norm': 0.002154744130209063, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:38<10:09, 3.67s/it] 68%|██████▊ | 355/520 [22:42<10:05, 3.67s/it] {'loss': 1.1975, 'grad_norm': 0.002072304398743255, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:42<10:05, 3.67s/it] 68%|██████▊ | 356/520 [22:46<10:05, 3.69s/it] {'loss': 1.1974, 'grad_norm': 0.002041009509208091, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:46<10:05, 3.69s/it] 69%|██████▊ | 357/520 [22:49<10:02, 3.70s/it] {'loss': 1.2234, 'grad_norm': 0.0019162361445568686, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:49<10:02, 3.70s/it] 69%|██████▉ | 358/520 [22:53<09:58, 3.69s/it] {'loss': 1.1533, 'grad_norm': 0.0019914938552215763, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:53<09:58, 3.69s/it] 69%|██████▉ | 359/520 [22:57<09:54, 3.69s/it] {'loss': 1.2798, 'grad_norm': 0.0021700294796137705, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:57<09:54, 3.69s/it] 69%|██████▉ | 360/520 [23:00<09:48, 3.68s/it] {'loss': 1.2924, 'grad_norm': 0.002151458950192528, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [23:00<09:48, 3.68s/it] 69%|██████▉ | 361/520 [23:04<09:44, 3.68s/it] {'loss': 1.2864, 'grad_norm': 0.0018423100469191773, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:04<09:44, 3.68s/it] 70%|██████▉ | 362/520 [23:08<09:40, 3.67s/it] {'loss': 1.2147, 'grad_norm': 0.0021163516479530613, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:08<09:40, 3.67s/it] 70%|██████▉ | 363/520 [23:11<09:35, 3.67s/it] {'loss': 1.2322, 'grad_norm': 0.0019462431191656128, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:11<09:35, 3.67s/it] 70%|███████ | 364/520 [23:15<09:45, 3.75s/it] {'loss': 1.3051, 'grad_norm': 0.001967296938618401, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:15<09:45, 3.75s/it] 70%|███████ | 365/520 [23:19<09:39, 3.74s/it] {'loss': 1.2988, 'grad_norm': 0.0020680330453961716, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:19<09:39, 3.74s/it] 70%|███████ | 366/520 [23:23<09:33, 3.72s/it] {'loss': 1.2504, 'grad_norm': 0.0020339000381459317, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:23<09:33, 3.72s/it] 71%|███████ | 367/520 [23:26<09:30, 3.73s/it] {'loss': 1.2483, 'grad_norm': 0.0019806415128703035, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:26<09:30, 3.73s/it] 71%|███████ | 368/520 [23:30<09:24, 3.71s/it] {'loss': 1.1003, 'grad_norm': 0.0020073865417832605, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:30<09:24, 3.71s/it] 71%|███████ | 369/520 [23:34<09:18, 3.70s/it] {'loss': 1.2733, 'grad_norm': 0.0021893284343307124, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:34<09:18, 3.70s/it] 71%|███████ | 370/520 [23:37<09:13, 3.69s/it] {'loss': 1.1595, 'grad_norm': 0.0018412273860929755, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:37<09:13, 3.69s/it] 71%|███████▏ | 371/520 [23:41<09:09, 3.69s/it] {'loss': 1.1625, 'grad_norm': 0.0020373476382252237, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:41<09:09, 3.69s/it] 72%|███████▏ | 372/520 [23:45<09:06, 3.69s/it] {'loss': 1.3483, 'grad_norm': 0.0018211672661455369, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:45<09:06, 3.69s/it] 72%|███████▏ | 373/520 [23:49<09:01, 3.68s/it] {'loss': 1.2285, 'grad_norm': 0.0021315031424551808, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:49<09:01, 3.68s/it] 72%|███████▏ | 374/520 [23:52<08:57, 3.68s/it] {'loss': 1.2458, 'grad_norm': 0.00199882899320793, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:52<08:57, 3.68s/it] 72%|███████▏ | 375/520 [23:56<08:53, 3.68s/it] {'loss': 1.1565, 'grad_norm': 0.0020336797419871913, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:56<08:53, 3.68s/it] 72%|███████▏ | 376/520 [24:00<08:54, 3.71s/it] {'loss': 1.2809, 'grad_norm': 0.0019998373064860353, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [24:00<08:54, 3.71s/it] 72%|███████▎ | 377/520 [24:03<08:49, 3.70s/it] {'loss': 1.2142, 'grad_norm': 0.0027910002064110064, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:03<08:49, 3.70s/it] 73%|███████▎ | 378/520 [24:07<08:45, 3.70s/it] {'loss': 1.2688, 'grad_norm': 0.0019262564895530662, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:07<08:45, 3.70s/it] 73%|███████▎ | 379/520 [24:11<08:41, 3.70s/it] {'loss': 1.2444, 'grad_norm': 0.0018696415350345282, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:11<08:41, 3.70s/it] 73%|███████▎ | 380/520 [24:14<08:38, 3.70s/it] {'loss': 1.3249, 'grad_norm': 0.0021543692437955187, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:14<08:38, 3.70s/it] 73%|███████▎ | 381/520 [24:18<08:33, 3.70s/it] {'loss': 1.2487, 'grad_norm': 0.001942892966641647, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:18<08:33, 3.70s/it] 73%|███████▎ | 382/520 [24:22<08:30, 3.70s/it] {'loss': 1.2793, 'grad_norm': 0.0022167598789768644, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:22<08:30, 3.70s/it] 74%|███████▎ | 383/520 [24:26<08:27, 3.70s/it] {'loss': 1.0852, 'grad_norm': 0.0020981746947131025, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:26<08:27, 3.70s/it] 74%|███████▍ | 384/520 [24:29<08:22, 3.70s/it] {'loss': 1.3536, 'grad_norm': 0.0020544987314823813, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:29<08:22, 3.70s/it] 74%|███████▍ | 385/520 [24:33<08:18, 3.70s/it] {'loss': 1.2242, 'grad_norm': 0.0018098044721291935, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:33<08:18, 3.70s/it] 74%|███████▍ | 386/520 [24:37<08:15, 3.70s/it] {'loss': 1.1754, 'grad_norm': 0.0016877809795111905, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:37<08:15, 3.70s/it] 74%|███████▍ | 387/520 [24:40<08:11, 3.69s/it] {'loss': 1.3508, 'grad_norm': 0.0019245794282714559, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:40<08:11, 3.69s/it] 75%|███████▍ | 388/520 [24:44<08:07, 3.69s/it] {'loss': 1.1241, 'grad_norm': 0.0018160750063209135, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:44<08:07, 3.69s/it] 75%|███████▍ | 389/520 [24:48<08:02, 3.68s/it] {'loss': 1.1756, 'grad_norm': 0.002221339268831648, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:48<08:02, 3.68s/it] 75%|███████▌ | 390/520 [24:51<07:59, 3.69s/it] {'loss': 1.238, 'grad_norm': 0.0019757428164173677, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:51<07:59, 3.69s/it] 75%|███████▌ | 391/520 [24:55<07:55, 3.69s/it] {'loss': 1.3205, 'grad_norm': 0.0020458038052839646, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:55<07:55, 3.69s/it] 75%|███████▌ | 392/520 [24:59<07:54, 3.71s/it] {'loss': 1.1324, 'grad_norm': 0.0018957780703813655, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:59<07:54, 3.71s/it] 76%|███████▌ | 393/520 [25:03<07:50, 3.70s/it] {'loss': 1.1646, 'grad_norm': 0.001721393776515405, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:03<07:50, 3.70s/it] 76%|███████▌ | 394/520 [25:06<07:45, 3.69s/it] {'loss': 1.1962, 'grad_norm': 0.0021016175231440217, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:06<07:45, 3.69s/it] 76%|███████▌ | 395/520 [25:10<07:43, 3.70s/it] {'loss': 1.1568, 'grad_norm': 0.0021544102209500025, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:10<07:43, 3.70s/it] 76%|███████▌ | 396/520 [25:14<07:39, 3.70s/it] {'loss': 1.2443, 'grad_norm': 0.002048808512011148, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:14<07:39, 3.70s/it] 76%|███████▋ | 397/520 [25:17<07:35, 3.70s/it] {'loss': 1.2239, 'grad_norm': 0.0018462685177190455, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:17<07:35, 3.70s/it] 77%|███████▋ | 398/520 [25:21<07:30, 3.69s/it] {'loss': 1.2257, 'grad_norm': 0.0020398695908024934, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:21<07:30, 3.69s/it] 77%|███████▋ | 399/520 [25:25<07:26, 3.69s/it] {'loss': 1.2107, 'grad_norm': 0.0019104190532383217, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:25<07:26, 3.69s/it] 77%|███████▋ | 400/520 [25:28<07:21, 3.68s/it] {'loss': 1.2629, 'grad_norm': 0.0019708543310875586, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:28<07:21, 3.68s/it] 77%|███████▋ | 401/520 [25:32<07:16, 3.67s/it] {'loss': 1.0557, 'grad_norm': 0.0021675272360403767, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:32<07:16, 3.67s/it] 77%|███████▋ | 402/520 [25:36<07:11, 3.66s/it] {'loss': 1.1681, 'grad_norm': 0.002088461033975728, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:36<07:11, 3.66s/it] 78%|███████▊ | 403/520 [25:39<07:06, 3.65s/it] {'loss': 1.1982, 'grad_norm': 0.0022104366430497047, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:39<07:06, 3.65s/it] 78%|███████▊ | 404/520 [25:43<07:02, 3.64s/it] {'loss': 1.1142, 'grad_norm': 0.002368167762182049, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:43<07:02, 3.64s/it] 78%|███████▊ | 405/520 [25:46<06:59, 3.65s/it] {'loss': 1.2129, 'grad_norm': 0.0018838394909607512, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:46<06:59, 3.65s/it] 78%|███████▊ | 406/520 [25:50<06:55, 3.65s/it] {'loss': 1.1455, 'grad_norm': 0.0022785948649968854, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:50<06:55, 3.65s/it] 78%|███████▊ | 407/520 [25:54<06:51, 3.64s/it] {'loss': 1.2926, 'grad_norm': 0.0019950763556493487, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:54<06:51, 3.64s/it] 78%|███████▊ | 408/520 [25:57<06:48, 3.65s/it] {'loss': 1.1847, 'grad_norm': 0.002090726748213694, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:57<06:48, 3.65s/it] 79%|███████▊ | 409/520 [26:01<06:44, 3.64s/it] {'loss': 1.3136, 'grad_norm': 0.002138611450607235, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:01<06:44, 3.64s/it] 79%|███████▉ | 410/520 [26:05<06:41, 3.65s/it] {'loss': 1.037, 'grad_norm': 0.001915883385294725, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:05<06:41, 3.65s/it] 79%|███████▉ | 411/520 [26:08<06:36, 3.64s/it] {'loss': 1.2891, 'grad_norm': 0.0022978121841739424, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:08<06:36, 3.64s/it] 79%|███████▉ | 412/520 [26:12<06:33, 3.64s/it] {'loss': 1.2071, 'grad_norm': 0.002013829970523242, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:12<06:33, 3.64s/it] 79%|███████▉ | 413/520 [26:16<06:30, 3.65s/it] {'loss': 1.2411, 'grad_norm': 0.0019268302307801784, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:16<06:30, 3.65s/it] 80%|███████▉ | 414/520 [26:19<06:26, 3.65s/it] {'loss': 1.0374, 'grad_norm': 0.0017233427160661956, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:19<06:26, 3.65s/it] 80%|███████▉ | 415/520 [26:23<06:24, 3.66s/it] {'loss': 1.1745, 'grad_norm': 0.0018581754340990498, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:23<06:24, 3.66s/it] 80%|████████ | 416/520 [26:27<06:20, 3.66s/it] {'loss': 1.099, 'grad_norm': 0.002244405013430018, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:27<06:20, 3.66s/it] 80%|████████ | 417/520 [26:30<06:17, 3.67s/it] {'loss': 1.2612, 'grad_norm': 0.002191425407630245, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:30<06:17, 3.67s/it] 80%|████████ | 418/520 [26:34<06:20, 3.73s/it] {'loss': 1.2434, 'grad_norm': 0.0018905831294720443, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:34<06:20, 3.73s/it] 81%|████████ | 419/520 [26:38<06:20, 3.77s/it] {'loss': 1.2307, 'grad_norm': 0.002110920606007406, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:38<06:20, 3.77s/it] 81%|████████ | 420/520 [26:42<06:21, 3.81s/it] {'loss': 1.122, 'grad_norm': 0.002078714053768333, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:42<06:21, 3.81s/it] 81%|████████ | 421/520 [26:46<06:16, 3.80s/it] {'loss': 1.0504, 'grad_norm': 0.0021527694089232005, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:46<06:16, 3.80s/it] 81%|████████ | 422/520 [26:49<06:07, 3.75s/it] {'loss': 1.1784, 'grad_norm': 0.0020210697006546206, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:49<06:07, 3.75s/it] 81%|████████▏ | 423/520 [26:53<06:00, 3.72s/it] {'loss': 1.1586, 'grad_norm': 0.0022462746711795255, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:53<06:00, 3.72s/it] 82%|████████▏ | 424/520 [26:57<05:55, 3.70s/it] {'loss': 1.3265, 'grad_norm': 0.002074226649398738, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:57<05:55, 3.70s/it] 82%|████████▏ | 425/520 [27:00<05:50, 3.69s/it] {'loss': 1.1673, 'grad_norm': 0.0018606365884481085, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:00<05:50, 3.69s/it] 82%|████████▏ | 426/520 [27:04<05:45, 3.67s/it] {'loss': 1.1984, 'grad_norm': 0.002789237085750094, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:04<05:45, 3.67s/it] 82%|████████▏ | 427/520 [27:08<05:41, 3.67s/it] {'loss': 1.1055, 'grad_norm': 0.0019321576419725275, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:08<05:41, 3.67s/it] 82%|████████▏ | 428/520 [27:11<05:36, 3.66s/it] {'loss': 1.0833, 'grad_norm': 0.0020245021688372906, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:11<05:36, 3.66s/it] 82%|████████▎ | 429/520 [27:15<05:33, 3.67s/it] {'loss': 1.1824, 'grad_norm': 0.0019149645946737797, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:15<05:33, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:19<05:28, 3.65s/it] {'loss': 1.1789, 'grad_norm': 0.0017881492537081373, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:19<05:28, 3.65s/it] 83%|████████▎ | 431/520 [27:22<05:25, 3.66s/it] {'loss': 1.208, 'grad_norm': 0.0020201736428239352, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:22<05:25, 3.66s/it] 83%|████████▎ | 432/520 [27:26<05:22, 3.66s/it] {'loss': 1.0944, 'grad_norm': 0.002039492855814234, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:26<05:22, 3.66s/it] 83%|████████▎ | 433/520 [27:30<05:17, 3.65s/it] {'loss': 1.2249, 'grad_norm': 0.001907604225434715, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:30<05:17, 3.65s/it] 83%|████████▎ | 434/520 [27:33<05:16, 3.68s/it] {'loss': 0.968, 'grad_norm': 0.0018998665303272889, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:33<05:16, 3.68s/it] 84%|████████▎ | 435/520 [27:37<05:12, 3.67s/it] {'loss': 1.2619, 'grad_norm': 0.0022724685083835468, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:37<05:12, 3.67s/it] 84%|████████▍ | 436/520 [27:41<05:08, 3.67s/it] {'loss': 1.0509, 'grad_norm': 0.0019171280465631383, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:41<05:08, 3.67s/it] 84%|████████▍ | 437/520 [27:44<05:03, 3.66s/it] {'loss': 1.2886, 'grad_norm': 0.0019671339717965944, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:44<05:03, 3.66s/it] 84%|████████▍ | 438/520 [27:48<05:00, 3.66s/it] {'loss': 1.0958, 'grad_norm': 0.001939349665570018, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:48<05:00, 3.66s/it] 84%|████████▍ | 439/520 [27:52<04:56, 3.66s/it] {'loss': 1.1808, 'grad_norm': 0.0016842117733972073, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:52<04:56, 3.66s/it] 85%|████████▍ | 440/520 [27:55<04:53, 3.67s/it] {'loss': 1.138, 'grad_norm': 0.001918360109139272, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:55<04:53, 3.67s/it] 85%|████████▍ | 441/520 [27:59<04:49, 3.67s/it] {'loss': 1.2117, 'grad_norm': 0.0021014948916445214, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:59<04:49, 3.67s/it] 85%|████████▌ | 442/520 [28:03<04:45, 3.66s/it] {'loss': 1.1986, 'grad_norm': 0.002205828900598015, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:03<04:45, 3.66s/it] 85%|████████▌ | 443/520 [28:06<04:41, 3.66s/it] {'loss': 1.2148, 'grad_norm': 0.001968155966144257, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:06<04:41, 3.66s/it] 85%|████████▌ | 444/520 [28:10<04:38, 3.67s/it] {'loss': 1.1821, 'grad_norm': 0.0017863544894624337, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:10<04:38, 3.67s/it] 86%|████████▌ | 445/520 [28:14<04:34, 3.66s/it] {'loss': 1.1004, 'grad_norm': 0.0018994421879536803, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:14<04:34, 3.66s/it] 86%|████████▌ | 446/520 [28:17<04:34, 3.71s/it] {'loss': 1.2819, 'grad_norm': 0.0018654118335969395, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:17<04:34, 3.71s/it] 86%|████████▌ | 447/520 [28:21<04:29, 3.70s/it] {'loss': 1.1938, 'grad_norm': 0.0019602059399889227, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:21<04:29, 3.70s/it] 86%|████████▌ | 448/520 [28:25<04:29, 3.74s/it] {'loss': 1.1736, 'grad_norm': 0.0020027502159613193, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:25<04:29, 3.74s/it] 86%|████████▋ | 449/520 [28:29<04:29, 3.79s/it] {'loss': 1.2307, 'grad_norm': 0.001951387510978697, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:29<04:29, 3.79s/it] 87%|████████▋ | 450/520 [28:33<04:25, 3.79s/it] {'loss': 1.2091, 'grad_norm': 0.001988970135205246, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:33<04:25, 3.79s/it] 87%|████████▋ | 451/520 [28:36<04:18, 3.75s/it] {'loss': 1.2018, 'grad_norm': 0.0019607044859190257, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:36<04:18, 3.75s/it] 87%|████████▋ | 452/520 [28:40<04:12, 3.71s/it] {'loss': 1.2724, 'grad_norm': 0.0018399034193462742, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:40<04:12, 3.71s/it] 87%|████████▋ | 453/520 [28:44<04:08, 3.70s/it] {'loss': 1.2667, 'grad_norm': 0.0019839155093720746, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:44<04:08, 3.70s/it] 87%|████████▋ | 454/520 [28:47<04:04, 3.71s/it] {'loss': 1.1164, 'grad_norm': 0.0021951440045212363, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:47<04:04, 3.71s/it] 88%|████████▊ | 455/520 [28:51<03:59, 3.69s/it] {'loss': 1.2558, 'grad_norm': 0.0019129085433490162, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:51<03:59, 3.69s/it] 88%|████████▊ | 456/520 [28:55<03:56, 3.69s/it] {'loss': 1.1671, 'grad_norm': 0.0019869758469456297, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:55<03:56, 3.69s/it] 88%|████████▊ | 457/520 [28:58<03:52, 3.68s/it] {'loss': 1.1909, 'grad_norm': 0.0017934922557200765, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:58<03:52, 3.68s/it] 88%|████████▊ | 458/520 [29:02<03:47, 3.68s/it] {'loss': 1.3107, 'grad_norm': 0.0020812615593649147, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:02<03:47, 3.68s/it] 88%|████████▊ | 459/520 [29:06<03:43, 3.67s/it] {'loss': 1.2447, 'grad_norm': 0.0019932121801645437, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:06<03:43, 3.67s/it] 88%|████████▊ | 460/520 [29:09<03:39, 3.65s/it] {'loss': 1.1205, 'grad_norm': 0.0019463531535511118, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:09<03:39, 3.65s/it] 89%|████████▊ | 461/520 [29:13<03:35, 3.65s/it] {'loss': 1.2739, 'grad_norm': 0.001629394831700896, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:13<03:35, 3.65s/it] 89%|████████▉ | 462/520 [29:17<03:34, 3.69s/it] {'loss': 1.3209, 'grad_norm': 0.0019208286973753, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:17<03:34, 3.69s/it] 89%|████████▉ | 463/520 [29:21<03:34, 3.76s/it] {'loss': 1.0782, 'grad_norm': 0.0022395412521657547, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:21<03:34, 3.76s/it] 89%|████████▉ | 464/520 [29:24<03:33, 3.80s/it] {'loss': 1.2269, 'grad_norm': 0.002071982222235926, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:24<03:33, 3.80s/it] 89%|████████▉ | 465/520 [29:28<03:31, 3.84s/it] {'loss': 1.3374, 'grad_norm': 0.0022137552493194587, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:28<03:31, 3.84s/it] 90%|████████▉ | 466/520 [29:32<03:28, 3.86s/it] {'loss': 1.2105, 'grad_norm': 0.0017530580486119043, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:32<03:28, 3.86s/it] 90%|████████▉ | 467/520 [29:36<03:26, 3.89s/it] {'loss': 1.2078, 'grad_norm': 0.0018225560246591368, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:36<03:26, 3.89s/it] 90%|█████████ | 468/520 [29:40<03:22, 3.90s/it] {'loss': 1.1892, 'grad_norm': 0.002222136738984622, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:40<03:22, 3.90s/it] 90%|█████████ | 469/520 [29:44<03:19, 3.91s/it] {'loss': 1.2434, 'grad_norm': 0.00213078108533889, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:44<03:19, 3.91s/it] 90%|█████████ | 470/520 [29:48<03:13, 3.86s/it] {'loss': 1.1271, 'grad_norm': 0.001761208292777033, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:48<03:13, 3.86s/it] 91%|█████████ | 471/520 [29:52<03:06, 3.81s/it] {'loss': 1.1424, 'grad_norm': 0.001994023281889523, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:52<03:06, 3.81s/it] 91%|█████████ | 472/520 [29:55<03:02, 3.80s/it] {'loss': 1.1134, 'grad_norm': 0.0019353900040908626, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:55<03:02, 3.80s/it] 91%|█████████ | 473/520 [29:59<02:57, 3.77s/it] {'loss': 1.1755, 'grad_norm': 0.001998371860058009, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:59<02:57, 3.77s/it] 91%|█████████ | 474/520 [30:03<02:53, 3.77s/it] {'loss': 1.2421, 'grad_norm': 0.0018242590671381728, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:03<02:53, 3.77s/it] 91%|█████████▏| 475/520 [30:07<02:48, 3.76s/it] {'loss': 1.1628, 'grad_norm': 0.001850292277106517, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:07<02:48, 3.76s/it] 92%|█████████▏| 476/520 [30:10<02:44, 3.74s/it] {'loss': 1.1695, 'grad_norm': 0.001988987279335518, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:10<02:44, 3.74s/it] 92%|█████████▏| 477/520 [30:14<02:40, 3.72s/it] {'loss': 1.1523, 'grad_norm': 0.0021520074110448497, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:14<02:40, 3.72s/it] 92%|█████████▏| 478/520 [30:18<02:36, 3.73s/it] {'loss': 1.1154, 'grad_norm': 0.0019655263588617784, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:18<02:36, 3.73s/it] 92%|█████████▏| 479/520 [30:21<02:32, 3.72s/it] {'loss': 1.2127, 'grad_norm': 0.001984597315067269, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:21<02:32, 3.72s/it] 92%|█████████▏| 480/520 [30:25<02:28, 3.71s/it] {'loss': 1.2363, 'grad_norm': 0.0018360419697590021, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:25<02:28, 3.71s/it] 92%|█████████▎| 481/520 [30:29<02:24, 3.71s/it] {'loss': 1.2349, 'grad_norm': 0.0017876475982593549, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:29<02:24, 3.71s/it] 93%|█████████▎| 482/520 [30:32<02:21, 3.71s/it] {'loss': 1.2434, 'grad_norm': 0.001996093427501204, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:32<02:21, 3.71s/it] 93%|█████████▎| 483/520 [30:36<02:17, 3.72s/it] {'loss': 1.1822, 'grad_norm': 0.0021655731729764032, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:36<02:17, 3.72s/it] 93%|█████████▎| 484/520 [30:40<02:13, 3.72s/it] {'loss': 1.1906, 'grad_norm': 0.0019972620849766473, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:40<02:13, 3.72s/it] 93%|█████████▎| 485/520 [30:44<02:10, 3.72s/it] {'loss': 1.132, 'grad_norm': 0.0018859505797611648, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:44<02:10, 3.72s/it] 93%|█████████▎| 486/520 [30:47<02:06, 3.71s/it] {'loss': 1.2594, 'grad_norm': 0.002083893485993248, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:47<02:06, 3.71s/it] 94%|█████████▎| 487/520 [30:51<02:02, 3.71s/it] {'loss': 1.1116, 'grad_norm': 0.00193638070740468, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:51<02:02, 3.71s/it] 94%|█████████▍| 488/520 [30:55<01:58, 3.70s/it] {'loss': 1.0527, 'grad_norm': 0.0019569804175627666, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:55<01:58, 3.70s/it] 94%|█████████▍| 489/520 [30:58<01:54, 3.69s/it] {'loss': 1.2393, 'grad_norm': 0.0016854433783348468, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:58<01:54, 3.69s/it] 94%|█████████▍| 490/520 [31:02<01:51, 3.73s/it] {'loss': 1.176, 'grad_norm': 0.002094498204980067, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:02<01:51, 3.73s/it] 94%|█████████▍| 491/520 [31:06<01:49, 3.77s/it] {'loss': 1.134, 'grad_norm': 0.0019958288955690393, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:06<01:49, 3.77s/it] 95%|█████████▍| 492/520 [31:10<01:46, 3.79s/it] {'loss': 1.2579, 'grad_norm': 0.0020644154554469336, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:10<01:46, 3.79s/it] 95%|█████████▍| 493/520 [31:14<01:42, 3.80s/it] {'loss': 1.2751, 'grad_norm': 0.0020158058676358224, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:14<01:42, 3.80s/it] 95%|█████████▌| 494/520 [31:17<01:38, 3.78s/it] {'loss': 1.1936, 'grad_norm': 0.0017834118093462937, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:17<01:38, 3.78s/it] 95%|█████████▌| 495/520 [31:21<01:33, 3.75s/it] {'loss': 1.1555, 'grad_norm': 0.0019210345410579427, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:21<01:33, 3.75s/it] 95%|█████████▌| 496/520 [31:25<01:29, 3.73s/it] {'loss': 1.0758, 'grad_norm': 0.002122131403232861, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:25<01:29, 3.73s/it] 96%|█████████▌| 497/520 [31:28<01:25, 3.71s/it] {'loss': 1.1753, 'grad_norm': 0.001693085587225952, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:28<01:25, 3.71s/it] 96%|█████████▌| 498/520 [31:32<01:21, 3.69s/it] {'loss': 1.1564, 'grad_norm': 0.0020316323508011796, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:32<01:21, 3.69s/it] 96%|█████████▌| 499/520 [31:36<01:17, 3.71s/it] {'loss': 1.3103, 'grad_norm': 0.0019284519451774763, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:36<01:17, 3.71s/it] 96%|█████████▌| 500/520 [31:40<01:13, 3.69s/it] {'loss': 1.2719, 'grad_norm': 0.0022886696650214534, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:40<01:13, 3.69s/it] 96%|█████████▋| 501/520 [31:43<01:10, 3.70s/it] {'loss': 1.2725, 'grad_norm': 0.0029148885210003024, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:43<01:10, 3.70s/it] 97%|█████████▋| 502/520 [31:47<01:06, 3.70s/it] {'loss': 1.1958, 'grad_norm': 0.0018244804098428895, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:47<01:06, 3.70s/it] 97%|█████████▋| 503/520 [31:51<01:02, 3.69s/it] {'loss': 1.208, 'grad_norm': 0.002026075289558693, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:51<01:02, 3.69s/it] 97%|█████████▋| 504/520 [31:54<00:59, 3.69s/it] {'loss': 1.1989, 'grad_norm': 0.0023813948069161175, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:54<00:59, 3.69s/it] 97%|█████████▋| 505/520 [31:58<00:55, 3.69s/it] {'loss': 1.2272, 'grad_norm': 0.0019297467593400353, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:58<00:55, 3.69s/it] 97%|█████████▋| 506/520 [32:02<00:51, 3.69s/it] {'loss': 1.1502, 'grad_norm': 0.00210846394465334, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:02<00:51, 3.69s/it] 98%|█████████▊| 507/520 [32:05<00:48, 3.70s/it] {'loss': 1.3511, 'grad_norm': 0.0018533900781651704, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:05<00:48, 3.70s/it] 98%|█████████▊| 508/520 [32:09<00:44, 3.71s/it] {'loss': 1.2665, 'grad_norm': 0.0020279499201429833, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:09<00:44, 3.71s/it] 98%|█████████▊| 509/520 [32:13<00:40, 3.70s/it] {'loss': 1.231, 'grad_norm': 0.001902847229158596, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:13<00:40, 3.70s/it] 98%|█████████▊| 510/520 [32:17<00:37, 3.70s/it] {'loss': 1.1831, 'grad_norm': 0.0019081305183014125, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:17<00:37, 3.70s/it] 98%|█████████▊| 511/520 [32:20<00:33, 3.68s/it] {'loss': 1.1656, 'grad_norm': 0.0018751109237395587, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:20<00:33, 3.68s/it] 98%|█████████▊| 512/520 [32:24<00:29, 3.69s/it] {'loss': 1.0425, 'grad_norm': 0.00197716988980364, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:24<00:29, 3.69s/it] 99%|█████████▊| 513/520 [32:28<00:25, 3.69s/it] {'loss': 1.2516, 'grad_norm': 0.002164337405061656, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:28<00:25, 3.69s/it] 99%|█████████▉| 514/520 [32:31<00:22, 3.69s/it] {'loss': 1.2185, 'grad_norm': 0.0018316589085423701, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:31<00:22, 3.69s/it] 99%|█████████▉| 515/520 [32:35<00:18, 3.73s/it] {'loss': 1.2656, 'grad_norm': 0.002288060154745, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:35<00:18, 3.73s/it] 99%|█████████▉| 516/520 [32:39<00:15, 3.81s/it] {'loss': 1.1545, 'grad_norm': 0.0019053323904472228, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:39<00:15, 3.81s/it] 99%|█████████▉| 517/520 [32:43<00:11, 3.83s/it] {'loss': 1.2636, 'grad_norm': 0.0020311195989709437, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:43<00:11, 3.83s/it] 100%|█████████▉| 518/520 [32:47<00:07, 3.86s/it] {'loss': 1.1804, 'grad_norm': 0.0021717077177459197, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:47<00:07, 3.86s/it] 100%|█████████▉| 519/520 [32:51<00:03, 3.87s/it] {'loss': 1.2172, 'grad_norm': 0.0019249217260815146, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:51<00:03, 3.87s/it] 100%|██████████| 520/520 [32:56<00:00, 4.14s/it] {'loss': 1.2572, 'grad_norm': 0.002082258029208718, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:56<00:00, 4.14s/it] {'train_runtime': 1976.0293, 'train_samples_per_second': 33.668, 'train_steps_per_second': 0.263, 'train_loss': 1.3338912826318008, 'epoch': 1.0} + 100%|██████████| 520/520 [32:56<00:00, 4.14s/it] 100%|██████████| 520/520 [32:56<00:00, 3.80s/it] +[2025-10-13 15:47:31,636] [INFO] [launch.py:348:main] Process 848853 exits successfully. +[2025-10-13 15:47:31,636] [INFO] [launch.py:348:main] Process 848855 exits successfully. +[2025-10-13 15:47:32,638] [INFO] [launch.py:348:main] Process 848856 exits successfully. +[2025-10-13 15:47:32,638] [INFO] [launch.py:348:main] Process 848854 exits successfully. +[2025-10-13 15:47:32,639] [INFO] [launch.py:348:main] Process 848852 exits successfully. +[2025-10-13 15:47:33,640] [INFO] [launch.py:348:main] Process 848857 exits successfully. +[2025-10-13 15:47:33,641] [INFO] [launch.py:348:main] Process 848851 exits successfully. +[2025-10-13 15:47:36,644] [INFO] [launch.py:348:main] Process 848850 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.7_2e-1_connector-3.0_2.7_2e-1_ablation_20251013_151303.log +Timestamp: 2025-10-13 15:47:39 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation_20251013_154739.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation_20251013_154739.log new file mode 100644 index 0000000000000000000000000000000000000000..22c1a9405d48faa567aa719e7752088abf9e1dbd --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation_20251013_154739.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation_20251013_154739.log +Timestamp: 2025-10-13 15:47:39 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 15:47:41,938] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:44,913] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 15:47:44,914] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 2.9 --temperature_mlp_text 2.9 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 2.9 --temperature_mlp_vision 2.9 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 2.9 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 15:47:47,501] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:48,558] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 15:47:48,558] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 15:47:48,558] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 15:47:48,558] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 15:47:48,558] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 15:47:48,558] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 15:47:48,558] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 15:47:48,560] [INFO] [launch.py:253:main] process 868622 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.9', '--temperature_mlp_text', '2.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.9', '--temperature_mlp_vision', '2.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:47:48,562] [INFO] [launch.py:253:main] process 868623 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.9', '--temperature_mlp_text', '2.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.9', '--temperature_mlp_vision', '2.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:47:48,565] [INFO] [launch.py:253:main] process 868624 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.9', '--temperature_mlp_text', '2.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.9', '--temperature_mlp_vision', '2.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:47:48,567] [INFO] [launch.py:253:main] process 868625 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.9', '--temperature_mlp_text', '2.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.9', '--temperature_mlp_vision', '2.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:47:48,569] [INFO] [launch.py:253:main] process 868626 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.9', '--temperature_mlp_text', '2.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.9', '--temperature_mlp_vision', '2.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:47:48,571] [INFO] [launch.py:253:main] process 868627 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.9', '--temperature_mlp_text', '2.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.9', '--temperature_mlp_vision', '2.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:47:48,573] [INFO] [launch.py:253:main] process 868628 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.9', '--temperature_mlp_text', '2.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.9', '--temperature_mlp_vision', '2.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 15:47:48,575] [INFO] [launch.py:253:main] process 868629 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '2.9', '--temperature_mlp_text', '2.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '2.9', '--temperature_mlp_vision', '2.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '2.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 15:47:55,176] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:55,440] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:55,495] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:55,499] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:55,521] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:55,531] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:55,574] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:55,574] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 15:47:55,582] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:47:55,841] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:47:55,894] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:47:55,897] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:47:55,913] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:47:55,919] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:47:55,979] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 15:47:55,979] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 15:47:55,979] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 2.9, 'temperature_mlp': 2.9, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 2.9, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.9, + "temperature_mlp": 2.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:868622:868622 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868622:868622 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868622:868622 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:868622:868622 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:868622:868622 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:868622:868622 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:868623:868623 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:868623:868623 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868623:868623 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868629:868629 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:868629:868629 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868629:868629 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868623:868623 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:868623:868623 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:868623:868623 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:868629:868629 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:868629:868629 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:868629:868629 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:868626:868626 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:868626:868626 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868626:868626 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:868626:868626 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:868626:868626 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:868626:868626 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:868628:868628 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:868628:868628 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868628:868628 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868628:868628 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:868628:868628 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:868628:868628 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:868624:868624 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:868624:868624 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868624:868624 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868624:868624 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:868624:868624 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:868624:868624 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:868627:868627 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:868627:868627 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868627:868627 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868627:868627 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:868627:868627 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:868627:868627 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:868625:868625 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:868625:868625 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868625:868625 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868625:868625 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:868625:868625 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:868625:868625 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO ncclCommInitRank comm 0x55b918e05c40 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x6dc510497947c4c6 - Init START +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO ncclCommInitRank comm 0x56247458bb90 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x6dc510497947c4c6 - Init START +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO ncclCommInitRank comm 0x564b20a0bdc0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x6dc510497947c4c6 - Init START +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO ncclCommInitRank comm 0x562ac040c9b0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x6dc510497947c4c6 - Init START +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO ncclCommInitRank comm 0x55e35b2041e0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x6dc510497947c4c6 - Init START +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO ncclCommInitRank comm 0x55eb835ec960 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x6dc510497947c4c6 - Init START +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO ncclCommInitRank comm 0x55623a3b8630 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x6dc510497947c4c6 - Init START +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO ncclCommInitRank comm 0x55c371f1c210 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x6dc510497947c4c6 - Init START +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO comm 0x56247458bb90 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO comm 0x55b918e05c40 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO comm 0x562ac040c9b0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO comm 0x55623a3b8630 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO comm 0x55eb835ec960 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO comm 0x55e35b2041e0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO comm 0x55c371f1c210 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO comm 0x564b20a0bdc0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:868625:870243 [3] NCCL INFO ncclCommInitRank comm 0x564b20a0bdc0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x6dc510497947c4c6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:868627:870242 [5] NCCL INFO ncclCommInitRank comm 0x55e35b2041e0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x6dc510497947c4c6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:868629:870238 [7] NCCL INFO ncclCommInitRank comm 0x55623a3b8630 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x6dc510497947c4c6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:868626:870239 [4] NCCL INFO ncclCommInitRank comm 0x55c371f1c210 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x6dc510497947c4c6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:868628:870240 [6] NCCL INFO ncclCommInitRank comm 0x55eb835ec960 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x6dc510497947c4c6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:868624:870241 [2] NCCL INFO ncclCommInitRank comm 0x56247458bb90 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x6dc510497947c4c6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:868622:870236 [0] NCCL INFO ncclCommInitRank comm 0x55b918e05c40 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x6dc510497947c4c6 - Init COMPLETE +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:868623:870237 [1] NCCL INFO ncclCommInitRank comm 0x562ac040c9b0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x6dc510497947c4c6 - Init COMPLETE +[2025-10-13 15:48:40,401] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 15:48:42,162] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 15:49:00,090 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 15:49:00,095 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:868629:875276 [7] NCCL INFO ncclCommInitRank comm 0x7fd8e806a910 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x62d0f02733b6ae4a - Init COMPLETE +ywang29-vrdb-test1-worker-0:868625:875273 [3] NCCL INFO ncclCommInitRank comm 0x7efb2806b050 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x62d0f02733b6ae4a - Init COMPLETE +ywang29-vrdb-test1-worker-0:868627:875277 [5] NCCL INFO ncclCommInitRank comm 0x7f205806b460 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x62d0f02733b6ae4a - Init COMPLETE +ywang29-vrdb-test1-worker-0:868623:875275 [1] NCCL INFO ncclCommInitRank comm 0x7fb88c06b060 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x62d0f02733b6ae4a - Init COMPLETE +ywang29-vrdb-test1-worker-0:868628:875274 [6] NCCL INFO ncclCommInitRank comm 0x7facac06b860 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x62d0f02733b6ae4a - Init COMPLETE +ywang29-vrdb-test1-worker-0:868626:875278 [4] NCCL INFO ncclCommInitRank comm 0x7fd3b406af10 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x62d0f02733b6ae4a - Init COMPLETE +ywang29-vrdb-test1-worker-0:868622:875270 [0] NCCL INFO ncclCommInitRank comm 0x7fa99406ab80 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x62d0f02733b6ae4a - Init COMPLETE +ywang29-vrdb-test1-worker-0:868624:875272 [2] NCCL INFO ncclCommInitRank comm 0x7f055806adc0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x62d0f02733b6ae4a - Init COMPLETE + 0%| | 1/520 [00:14<2:02:16, 14.14s/it] {'loss': 5.6236, 'grad_norm': 0.15452479303818728, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:02:16, 14.14s/it] 0%| | 2/520 [00:17<1:09:03, 8.00s/it] {'loss': 5.0585, 'grad_norm': 0.1413044622314903, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:03, 8.00s/it] 1%| | 3/520 [00:21<51:57, 6.03s/it] {'loss': 3.6987, 'grad_norm': 0.10108199717076055, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:57, 6.03s/it] 1%| | 4/520 [00:25<43:45, 5.09s/it] {'loss': 2.5953, 'grad_norm': 0.044548715466884374, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<43:45, 5.09s/it] 1%| | 5/520 [00:28<39:15, 4.57s/it] {'loss': 2.2404, 'grad_norm': 0.028226089871719397, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:15, 4.57s/it] 1%| | 6/520 [00:32<36:32, 4.27s/it] {'loss': 2.1228, 'grad_norm': 0.024994240559436658, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:32, 4.27s/it] 1%|▏ | 7/520 [00:36<35:14, 4.12s/it] {'loss': 1.8605, 'grad_norm': 0.0204465609089972, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<35:14, 4.12s/it] 2%|▏ | 8/520 [00:40<35:40, 4.18s/it] {'loss': 1.792, 'grad_norm': 0.009899491751428988, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:40, 4.18s/it] 2%|▏ | 9/520 [00:44<35:55, 4.22s/it] {'loss': 1.8248, 'grad_norm': 0.013454446697606696, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<35:55, 4.22s/it] 2%|▏ | 10/520 [00:48<34:53, 4.11s/it] {'loss': 1.5635, 'grad_norm': 0.006578525789384509, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:53, 4.11s/it] 2%|▏ | 11/520 [00:52<34:20, 4.05s/it] {'loss': 1.6557, 'grad_norm': 0.01095652527940686, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<34:20, 4.05s/it] 2%|▏ | 12/520 [00:56<33:09, 3.92s/it] {'loss': 1.6465, 'grad_norm': 0.007623923642767416, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<33:09, 3.92s/it][2025-10-13 15:50:05,272] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<34:17, 4.06s/it] {'loss': 1.579, 'grad_norm': 0.0062533049189566915, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<34:17, 4.06s/it] 3%|▎ | 14/520 [01:04<33:10, 3.93s/it] {'loss': 1.6121, 'grad_norm': 0.00698590911522959, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:04<33:10, 3.93s/it] 3%|▎ | 15/520 [01:07<32:17, 3.84s/it] {'loss': 1.6581, 'grad_norm': 0.007114078083865904, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:17, 3.84s/it] 3%|▎ | 16/520 [01:11<31:38, 3.77s/it] {'loss': 1.5842, 'grad_norm': 0.005799795830154064, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<31:38, 3.77s/it] 3%|▎ | 17/520 [01:15<31:25, 3.75s/it] {'loss': 1.6569, 'grad_norm': 0.005541230492116907, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<31:25, 3.75s/it] 3%|▎ | 18/520 [01:19<31:33, 3.77s/it] {'loss': 1.4687, 'grad_norm': 0.0042619661538970555, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<31:33, 3.77s/it] 4%|▎ | 19/520 [01:22<31:40, 3.79s/it] {'loss': 1.6157, 'grad_norm': 0.005000795149828259, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<31:40, 3.79s/it] 4%|▍ | 20/520 [01:26<31:32, 3.78s/it] {'loss': 1.4696, 'grad_norm': 0.004901907789037012, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<31:32, 3.78s/it] 4%|▍ | 21/520 [01:30<31:09, 3.75s/it] {'loss': 1.6157, 'grad_norm': 0.00491371763192352, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<31:09, 3.75s/it] 4%|▍ | 22/520 [01:34<30:49, 3.71s/it] {'loss': 1.6151, 'grad_norm': 0.004136605906914878, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:49, 3.71s/it] 4%|▍ | 23/520 [01:37<30:30, 3.68s/it] {'loss': 1.5437, 'grad_norm': 0.003718742362934995, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:30, 3.68s/it] 5%|▍ | 24/520 [01:41<30:19, 3.67s/it] {'loss': 1.5485, 'grad_norm': 0.005214751907853641, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:19, 3.67s/it] 5%|▍ | 25/520 [01:44<30:09, 3.66s/it] {'loss': 1.5586, 'grad_norm': 0.00393885975927914, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:09, 3.66s/it] 5%|▌ | 26/520 [01:48<30:02, 3.65s/it] {'loss': 1.53, 'grad_norm': 0.0038625189084016444, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:02, 3.65s/it] 5%|▌ | 27/520 [01:52<30:25, 3.70s/it] {'loss': 1.4389, 'grad_norm': 0.003667550653623007, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:25, 3.70s/it] 5%|▌ | 28/520 [01:56<30:43, 3.75s/it] {'loss': 1.4317, 'grad_norm': 0.0038697149838945104, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<30:43, 3.75s/it] 6%|▌ | 29/520 [02:00<30:53, 3.77s/it] {'loss': 1.443, 'grad_norm': 0.00329285405687499, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<30:53, 3.77s/it] 6%|▌ | 30/520 [02:03<30:58, 3.79s/it] {'loss': 1.6287, 'grad_norm': 0.0037002698514502492, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<30:58, 3.79s/it] 6%|▌ | 31/520 [02:07<30:58, 3.80s/it] {'loss': 1.4353, 'grad_norm': 0.003056430704362983, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<30:58, 3.80s/it] 6%|▌ | 32/520 [02:11<31:02, 3.82s/it] {'loss': 1.6319, 'grad_norm': 0.007819818263736997, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:11<31:02, 3.82s/it] 6%|▋ | 33/520 [02:15<30:59, 3.82s/it] {'loss': 1.4453, 'grad_norm': 0.0035627086649264858, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:15<30:59, 3.82s/it] 7%|▋ | 34/520 [02:19<30:55, 3.82s/it] {'loss': 1.4302, 'grad_norm': 0.004613941208659341, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:19<30:55, 3.82s/it] 7%|▋ | 35/520 [02:23<30:54, 3.82s/it] {'loss': 1.4561, 'grad_norm': 0.003946082397332782, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:23<30:54, 3.82s/it] 7%|▋ | 36/520 [02:26<30:46, 3.81s/it] {'loss': 1.5614, 'grad_norm': 0.003291039266506155, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:26<30:46, 3.81s/it] 7%|▋ | 37/520 [02:30<30:21, 3.77s/it] {'loss': 1.636, 'grad_norm': 0.006074580590823786, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:30<30:21, 3.77s/it] 7%|▋ | 38/520 [02:34<30:00, 3.74s/it] {'loss': 1.6387, 'grad_norm': 0.0055886492928382, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:34<30:00, 3.74s/it] 8%|▊ | 39/520 [02:37<29:50, 3.72s/it] {'loss': 1.4665, 'grad_norm': 0.0032749555978377268, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<29:50, 3.72s/it] 8%|▊ | 40/520 [02:41<29:53, 3.74s/it] {'loss': 1.5088, 'grad_norm': 0.005160911415083427, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:41<29:53, 3.74s/it] 8%|▊ | 41/520 [02:45<29:38, 3.71s/it] {'loss': 1.4713, 'grad_norm': 0.0033525478249802035, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:45<29:38, 3.71s/it] 8%|▊ | 42/520 [02:48<29:24, 3.69s/it] {'loss': 1.5043, 'grad_norm': 0.005286654990087345, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:48<29:24, 3.69s/it] 8%|▊ | 43/520 [02:52<29:17, 3.68s/it] {'loss': 1.4897, 'grad_norm': 0.005274335633903479, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:52<29:17, 3.68s/it] 8%|▊ | 44/520 [02:56<29:06, 3.67s/it] {'loss': 1.6006, 'grad_norm': 0.00407531379676451, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:56<29:06, 3.67s/it] 9%|▊ | 45/520 [02:59<29:09, 3.68s/it] {'loss': 1.5034, 'grad_norm': 0.005540633868062694, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:59<29:09, 3.68s/it] 9%|▉ | 46/520 [03:03<28:59, 3.67s/it] {'loss': 1.6552, 'grad_norm': 0.00349194005583129, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:03<28:59, 3.67s/it] 9%|▉ | 47/520 [03:07<28:54, 3.67s/it] {'loss': 1.4856, 'grad_norm': 0.005436431386463045, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:07<28:54, 3.67s/it] 9%|▉ | 48/520 [03:10<28:45, 3.66s/it] {'loss': 1.4592, 'grad_norm': 0.0033404429424705897, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:10<28:45, 3.66s/it] 9%|▉ | 49/520 [03:14<28:37, 3.65s/it] {'loss': 1.4884, 'grad_norm': 0.0032094386524682606, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:14<28:37, 3.65s/it] 10%|▉ | 50/520 [03:18<28:29, 3.64s/it] {'loss': 1.4782, 'grad_norm': 0.003893380282219322, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:18<28:29, 3.64s/it] 10%|▉ | 51/520 [03:21<28:48, 3.69s/it] {'loss': 1.4013, 'grad_norm': 0.0029098964095952322, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:21<28:48, 3.69s/it] 10%|█ | 52/520 [03:25<29:05, 3.73s/it] {'loss': 1.5481, 'grad_norm': 0.005699831193994952, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:25<29:05, 3.73s/it] 10%|█ | 53/520 [03:29<29:28, 3.79s/it] {'loss': 1.5319, 'grad_norm': 0.003274649628252116, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:29<29:28, 3.79s/it] 10%|█ | 54/520 [03:33<29:29, 3.80s/it] {'loss': 1.4263, 'grad_norm': 0.004025917787536399, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<29:29, 3.80s/it] 11%|█ | 55/520 [03:37<29:30, 3.81s/it] {'loss': 1.4044, 'grad_norm': 0.004024742711538445, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<29:30, 3.81s/it] 11%|█ | 56/520 [03:41<29:19, 3.79s/it] {'loss': 1.5355, 'grad_norm': 0.003049003739311315, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:41<29:19, 3.79s/it] 11%|█ | 57/520 [03:44<29:01, 3.76s/it] {'loss': 1.4109, 'grad_norm': 0.005697645207294023, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:44<29:01, 3.76s/it] 11%|█ | 58/520 [03:48<28:54, 3.75s/it] {'loss': 1.5576, 'grad_norm': 0.003557637595783364, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<28:54, 3.75s/it] 11%|█▏ | 59/520 [03:52<28:41, 3.74s/it] {'loss': 1.4157, 'grad_norm': 0.0031954541265582018, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<28:41, 3.74s/it] 12%|█▏ | 60/520 [03:56<28:54, 3.77s/it] {'loss': 1.4883, 'grad_norm': 0.005952657437127828, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<28:54, 3.77s/it] 12%|█▏ | 61/520 [03:59<28:44, 3.76s/it] {'loss': 1.5833, 'grad_norm': 0.004826537857798294, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<28:44, 3.76s/it] 12%|█▏ | 62/520 [04:03<28:20, 3.71s/it] {'loss': 1.4449, 'grad_norm': 0.00377445226486966, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:20, 3.71s/it] 12%|█▏ | 63/520 [04:07<28:11, 3.70s/it] {'loss': 1.449, 'grad_norm': 0.004751857057431255, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<28:11, 3.70s/it] 12%|█▏ | 64/520 [04:10<28:03, 3.69s/it] {'loss': 1.473, 'grad_norm': 0.0030976685519822755, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:10<28:03, 3.69s/it] 12%|█▎ | 65/520 [04:14<27:56, 3.68s/it] {'loss': 1.4754, 'grad_norm': 0.003292862384726622, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<27:56, 3.68s/it] 13%|█▎ | 66/520 [04:18<27:56, 3.69s/it] {'loss': 1.4395, 'grad_norm': 0.0048363482568862266, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:18<27:56, 3.69s/it] 13%|█▎ | 67/520 [04:21<28:20, 3.75s/it] {'loss': 1.3246, 'grad_norm': 0.003140950828133961, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:21<28:20, 3.75s/it] 13%|█▎ | 68/520 [04:25<28:31, 3.79s/it] {'loss': 1.3709, 'grad_norm': 0.002510325002185883, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:25<28:31, 3.79s/it] 13%|█▎ | 69/520 [04:29<28:30, 3.79s/it] {'loss': 1.3729, 'grad_norm': 0.004859983224847915, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:29<28:30, 3.79s/it] 13%|█▎ | 70/520 [04:33<28:07, 3.75s/it] {'loss': 1.4065, 'grad_norm': 0.0034759301407595415, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:33<28:07, 3.75s/it] 14%|█▎ | 71/520 [04:36<27:52, 3.73s/it] {'loss': 1.3297, 'grad_norm': 0.0026392885772046526, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:36<27:52, 3.73s/it] 14%|█▍ | 72/520 [04:40<27:41, 3.71s/it] {'loss': 1.4826, 'grad_norm': 0.004015418937782838, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:40<27:41, 3.71s/it] 14%|█▍ | 73/520 [04:44<27:32, 3.70s/it] {'loss': 1.3192, 'grad_norm': 0.0029517439582468254, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:44<27:32, 3.70s/it] 14%|█▍ | 74/520 [04:47<27:22, 3.68s/it] {'loss': 1.43, 'grad_norm': 0.0026554375215551997, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:47<27:22, 3.68s/it] 14%|█▍ | 75/520 [04:51<27:19, 3.68s/it] {'loss': 1.3308, 'grad_norm': 0.003130803825891688, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:51<27:19, 3.68s/it] 15%|█▍ | 76/520 [04:55<27:12, 3.68s/it] {'loss': 1.6201, 'grad_norm': 0.004678884556421238, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:55<27:12, 3.68s/it] 15%|█▍ | 77/520 [04:58<27:10, 3.68s/it] {'loss': 1.261, 'grad_norm': 0.002970646568178077, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:58<27:10, 3.68s/it] 15%|█▌ | 78/520 [05:02<27:03, 3.67s/it] {'loss': 1.382, 'grad_norm': 0.0030091976338405438, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:02<27:03, 3.67s/it] 15%|█▌ | 79/520 [05:06<27:00, 3.68s/it] {'loss': 1.3612, 'grad_norm': 0.0025302513789907774, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:06<27:00, 3.68s/it] 15%|█▌ | 80/520 [05:10<27:01, 3.68s/it] {'loss': 1.6761, 'grad_norm': 0.00642170377959006, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:10<27:01, 3.68s/it] 16%|█▌ | 81/520 [05:13<26:53, 3.67s/it] {'loss': 1.5046, 'grad_norm': 0.00374225149824158, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:13<26:53, 3.67s/it] 16%|█▌ | 82/520 [05:17<26:46, 3.67s/it] {'loss': 1.4356, 'grad_norm': 0.0026035693412160676, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:17<26:46, 3.67s/it] 16%|█▌ | 83/520 [05:20<26:37, 3.66s/it] {'loss': 1.4534, 'grad_norm': 0.002749462472787305, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:20<26:37, 3.66s/it] 16%|█▌ | 84/520 [05:24<26:30, 3.65s/it] {'loss': 1.4714, 'grad_norm': 0.0036047044052487692, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:24<26:30, 3.65s/it] 16%|█▋ | 85/520 [05:28<26:22, 3.64s/it] {'loss': 1.4809, 'grad_norm': 0.002496255678539977, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:28<26:22, 3.64s/it] 17%|█▋ | 86/520 [05:31<26:19, 3.64s/it] {'loss': 1.5023, 'grad_norm': 0.003119151112244778, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:31<26:19, 3.64s/it] 17%|█▋ | 87/520 [05:35<26:21, 3.65s/it] {'loss': 1.6083, 'grad_norm': 0.0048660016587487205, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:35<26:21, 3.65s/it] 17%|█▋ | 88/520 [05:39<26:50, 3.73s/it] {'loss': 1.5579, 'grad_norm': 0.004274713044052087, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:39<26:50, 3.73s/it] 17%|█▋ | 89/520 [05:43<27:00, 3.76s/it] {'loss': 1.4501, 'grad_norm': 0.0030346321503461906, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:43<27:00, 3.76s/it] 17%|█▋ | 90/520 [05:47<26:58, 3.76s/it] {'loss': 1.3841, 'grad_norm': 0.002772417213891232, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:47<26:58, 3.76s/it] 18%|█▊ | 91/520 [05:50<26:38, 3.73s/it] {'loss': 1.4457, 'grad_norm': 0.0025346704025763886, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:50<26:38, 3.73s/it] 18%|█▊ | 92/520 [05:54<26:24, 3.70s/it] {'loss': 1.3884, 'grad_norm': 0.002731562495002805, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:54<26:24, 3.70s/it] 18%|█▊ | 93/520 [05:58<26:23, 3.71s/it] {'loss': 1.4018, 'grad_norm': 0.0030383284304297416, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:58<26:23, 3.71s/it] 18%|█▊ | 94/520 [06:01<26:20, 3.71s/it] {'loss': 1.4989, 'grad_norm': 0.0030638701739593337, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:01<26:20, 3.71s/it] 18%|█▊ | 95/520 [06:05<26:17, 3.71s/it] {'loss': 1.382, 'grad_norm': 0.002997086777887834, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:05<26:17, 3.71s/it] 18%|█▊ | 96/520 [06:09<26:11, 3.71s/it] {'loss': 1.3885, 'grad_norm': 0.002259670538574872, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:09<26:11, 3.71s/it] 19%|█▊ | 97/520 [06:12<26:08, 3.71s/it] {'loss': 1.3678, 'grad_norm': 0.003315492784107424, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:12<26:08, 3.71s/it] 19%|█▉ | 98/520 [06:16<26:08, 3.72s/it] {'loss': 1.3528, 'grad_norm': 0.0022016741667214438, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:16<26:08, 3.72s/it] 19%|█▉ | 99/520 [06:20<26:22, 3.76s/it] {'loss': 1.374, 'grad_norm': 0.0024612325528783814, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:20<26:22, 3.76s/it] 19%|█▉ | 100/520 [06:24<26:21, 3.77s/it] {'loss': 1.521, 'grad_norm': 0.004001121092809995, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:24<26:21, 3.77s/it] 19%|█▉ | 101/520 [06:28<26:17, 3.77s/it] {'loss': 1.375, 'grad_norm': 0.0026289262357635066, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:28<26:17, 3.77s/it] 20%|█▉ | 102/520 [06:31<26:14, 3.77s/it] {'loss': 1.3854, 'grad_norm': 0.002683842817822291, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:31<26:14, 3.77s/it] 20%|█▉ | 103/520 [06:35<26:10, 3.77s/it] {'loss': 1.3134, 'grad_norm': 0.002232626203705311, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:35<26:10, 3.77s/it] 20%|██ | 104/520 [06:39<26:03, 3.76s/it] {'loss': 1.3924, 'grad_norm': 0.0026713393298052534, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:39<26:03, 3.76s/it] 20%|██ | 105/520 [06:43<25:59, 3.76s/it] {'loss': 1.3804, 'grad_norm': 0.0023147149873006846, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:43<25:59, 3.76s/it] 20%|██ | 106/520 [06:46<25:55, 3.76s/it] {'loss': 1.4717, 'grad_norm': 0.002675785082895147, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:46<25:55, 3.76s/it] 21%|██ | 107/520 [06:50<25:48, 3.75s/it] {'loss': 1.4514, 'grad_norm': 0.002876096561295436, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:50<25:48, 3.75s/it] 21%|██ | 108/520 [06:54<25:40, 3.74s/it] {'loss': 1.3309, 'grad_norm': 0.002515053392222735, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:54<25:40, 3.74s/it] 21%|██ | 109/520 [06:57<25:32, 3.73s/it] {'loss': 1.486, 'grad_norm': 0.005090590055472242, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:57<25:32, 3.73s/it] 21%|██ | 110/520 [07:01<25:27, 3.73s/it] {'loss': 1.5305, 'grad_norm': 0.0024178396277795187, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:01<25:27, 3.73s/it] 21%|██▏ | 111/520 [07:05<25:19, 3.71s/it] {'loss': 1.5333, 'grad_norm': 0.0025960769076918138, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:05<25:19, 3.71s/it] 22%|██▏ | 112/520 [07:09<25:11, 3.71s/it] {'loss': 1.4121, 'grad_norm': 0.002249636187585578, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:09<25:11, 3.71s/it] 22%|██▏ | 113/520 [07:12<25:08, 3.71s/it] {'loss': 1.2824, 'grad_norm': 0.0021904762772492667, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:12<25:08, 3.71s/it] 22%|██▏ | 114/520 [07:16<25:03, 3.70s/it] {'loss': 1.385, 'grad_norm': 0.002290248347454283, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:16<25:03, 3.70s/it] 22%|██▏ | 115/520 [07:20<25:05, 3.72s/it] {'loss': 1.5022, 'grad_norm': 0.0022165247827363147, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:20<25:05, 3.72s/it] 22%|██▏ | 116/520 [07:23<24:58, 3.71s/it] {'loss': 1.5007, 'grad_norm': 0.002079253578370548, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:23<24:58, 3.71s/it] 22%|██▎ | 117/520 [07:27<24:57, 3.72s/it] {'loss': 1.4779, 'grad_norm': 0.0024950707754145323, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:27<24:57, 3.72s/it] 23%|██▎ | 118/520 [07:31<24:53, 3.72s/it] {'loss': 1.3639, 'grad_norm': 0.0021460737514296248, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:31<24:53, 3.72s/it] 23%|██▎ | 119/520 [07:35<24:49, 3.71s/it] {'loss': 1.3186, 'grad_norm': 0.0022474970503108525, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:35<24:49, 3.71s/it] 23%|██▎ | 120/520 [07:38<24:45, 3.71s/it] {'loss': 1.3487, 'grad_norm': 0.002620812057555152, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:38<24:45, 3.71s/it] 23%|██▎ | 121/520 [07:42<24:42, 3.72s/it] {'loss': 1.4022, 'grad_norm': 0.0027230994730574614, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:42<24:42, 3.72s/it] 23%|██▎ | 122/520 [07:46<24:36, 3.71s/it] {'loss': 1.2935, 'grad_norm': 0.0022467214735560352, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:46<24:36, 3.71s/it] 24%|██▎ | 123/520 [07:49<24:27, 3.70s/it] {'loss': 1.5085, 'grad_norm': 0.003334289551182738, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:49<24:27, 3.70s/it] 24%|██▍ | 124/520 [07:53<24:19, 3.69s/it] {'loss': 1.3752, 'grad_norm': 0.0026366258802446793, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:53<24:19, 3.69s/it] 24%|██▍ | 125/520 [07:57<24:18, 3.69s/it] {'loss': 1.3631, 'grad_norm': 0.002484936779288082, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:57<24:18, 3.69s/it] 24%|██▍ | 126/520 [08:01<25:31, 3.89s/it] {'loss': 1.42, 'grad_norm': 0.0021587682144999566, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:01<25:31, 3.89s/it] 24%|██▍ | 127/520 [08:05<25:05, 3.83s/it] {'loss': 1.3445, 'grad_norm': 0.0031272638983157602, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:05<25:05, 3.83s/it] 25%|██▍ | 128/520 [08:08<24:46, 3.79s/it] {'loss': 1.3942, 'grad_norm': 0.0024023075665536085, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:08<24:46, 3.79s/it] 25%|██▍ | 129/520 [08:12<24:25, 3.75s/it] {'loss': 1.3129, 'grad_norm': 0.0020977208011993894, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:12<24:25, 3.75s/it] 25%|██▌ | 130/520 [08:16<24:10, 3.72s/it] {'loss': 1.3758, 'grad_norm': 0.0024383886956549894, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:16<24:10, 3.72s/it] 25%|██▌ | 131/520 [08:19<23:57, 3.69s/it] {'loss': 1.3764, 'grad_norm': 0.002523839775098716, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:19<23:57, 3.69s/it] 25%|██▌ | 132/520 [08:23<23:48, 3.68s/it] {'loss': 1.421, 'grad_norm': 0.002491595902674158, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:23<23:48, 3.68s/it] 26%|██▌ | 133/520 [08:27<23:45, 3.68s/it] {'loss': 1.331, 'grad_norm': 0.00240057350230429, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:27<23:45, 3.68s/it] 26%|██▌ | 134/520 [08:30<23:45, 3.69s/it] {'loss': 1.4191, 'grad_norm': 0.0027888120106384097, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:30<23:45, 3.69s/it] 26%|██▌ | 135/520 [08:34<23:33, 3.67s/it] {'loss': 1.4873, 'grad_norm': 0.00236886361484122, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:34<23:33, 3.67s/it] 26%|██▌ | 136/520 [08:38<23:32, 3.68s/it] {'loss': 1.4037, 'grad_norm': 0.0023598342203850916, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:38<23:32, 3.68s/it] 26%|██▋ | 137/520 [08:41<23:33, 3.69s/it] {'loss': 1.3289, 'grad_norm': 0.002599541153264408, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:41<23:33, 3.69s/it] 27%|██▋ | 138/520 [08:45<23:26, 3.68s/it] {'loss': 1.3297, 'grad_norm': 0.002125451500842133, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:45<23:26, 3.68s/it] 27%|██▋ | 139/520 [08:49<23:27, 3.70s/it] {'loss': 1.2761, 'grad_norm': 0.0026351127637431935, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:49<23:27, 3.70s/it] 27%|██▋ | 140/520 [08:53<23:26, 3.70s/it] {'loss': 1.4203, 'grad_norm': 0.0023464574962128904, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:53<23:26, 3.70s/it] 27%|██▋ | 141/520 [08:56<23:19, 3.69s/it] {'loss': 1.4448, 'grad_norm': 0.0022012701633588225, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:56<23:19, 3.69s/it] 27%|██▋ | 142/520 [09:00<23:16, 3.69s/it] {'loss': 1.4631, 'grad_norm': 0.002445737591008339, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:00<23:16, 3.69s/it] 28%|██▊ | 143/520 [09:04<23:06, 3.68s/it] {'loss': 1.3676, 'grad_norm': 0.0027343273980702846, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:04<23:06, 3.68s/it] 28%|██▊ | 144/520 [09:07<23:02, 3.68s/it] {'loss': 1.322, 'grad_norm': 0.0023218564584758174, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:07<23:02, 3.68s/it] 28%|██▊ | 145/520 [09:11<23:00, 3.68s/it] {'loss': 1.2528, 'grad_norm': 0.002053499228715162, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:11<23:00, 3.68s/it] 28%|██▊ | 146/520 [09:15<22:57, 3.68s/it] {'loss': 1.4946, 'grad_norm': 0.002246176481240338, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:15<22:57, 3.68s/it] 28%|██▊ | 147/520 [09:18<22:51, 3.68s/it] {'loss': 1.3024, 'grad_norm': 0.002348171397051105, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:18<22:51, 3.68s/it] 28%|██▊ | 148/520 [09:22<22:50, 3.68s/it] {'loss': 1.3358, 'grad_norm': 0.0022364410087674716, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:22<22:50, 3.68s/it] 29%|██▊ | 149/520 [09:26<22:46, 3.68s/it] {'loss': 1.282, 'grad_norm': 0.0022834226820338524, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:26<22:46, 3.68s/it] 29%|██▉ | 150/520 [09:29<22:41, 3.68s/it] {'loss': 1.524, 'grad_norm': 0.0025105720988229624, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:29<22:41, 3.68s/it] 29%|██▉ | 151/520 [09:33<22:37, 3.68s/it] {'loss': 1.3241, 'grad_norm': 0.002267511807500746, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:33<22:37, 3.68s/it] 29%|██▉ | 152/520 [09:37<22:33, 3.68s/it] {'loss': 1.2945, 'grad_norm': 0.002344257805151766, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:37<22:33, 3.68s/it] 29%|██▉ | 153/520 [09:40<22:32, 3.69s/it] {'loss': 1.3311, 'grad_norm': 0.0024075046741049663, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:40<22:32, 3.69s/it] 30%|██▉ | 154/520 [09:44<22:26, 3.68s/it] {'loss': 1.4151, 'grad_norm': 0.0021842927689804696, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:44<22:26, 3.68s/it] 30%|██▉ | 155/520 [09:48<22:25, 3.69s/it] {'loss': 1.3275, 'grad_norm': 0.002274466529717364, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:48<22:25, 3.69s/it] 30%|███ | 156/520 [09:51<22:22, 3.69s/it] {'loss': 1.3502, 'grad_norm': 0.0023582848590429028, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:51<22:22, 3.69s/it] 30%|███ | 157/520 [09:55<22:17, 3.68s/it] {'loss': 1.4953, 'grad_norm': 0.0024265705628097665, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:55<22:17, 3.68s/it] 30%|███ | 158/520 [09:59<22:09, 3.67s/it] {'loss': 1.3284, 'grad_norm': 0.002586125369254918, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:59<22:09, 3.67s/it] 31%|███ | 159/520 [10:02<22:02, 3.66s/it] {'loss': 1.3626, 'grad_norm': 0.002148431406687651, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:02<22:02, 3.66s/it] 31%|███ | 160/520 [10:06<21:58, 3.66s/it] {'loss': 1.3888, 'grad_norm': 0.002351893678037357, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:06<21:58, 3.66s/it] 31%|███ | 161/520 [10:10<21:55, 3.67s/it] {'loss': 1.3627, 'grad_norm': 0.0022842407647872794, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:10<21:55, 3.67s/it] 31%|███ | 162/520 [10:13<21:49, 3.66s/it] {'loss': 1.4156, 'grad_norm': 0.0023925745904015694, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:13<21:49, 3.66s/it] 31%|███▏ | 163/520 [10:17<21:44, 3.65s/it] {'loss': 1.2423, 'grad_norm': 0.002924634465984308, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:17<21:44, 3.65s/it] 32%|███▏ | 164/520 [10:21<21:39, 3.65s/it] {'loss': 1.2141, 'grad_norm': 0.0021238931787360624, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:21<21:39, 3.65s/it] 32%|███▏ | 165/520 [10:24<21:34, 3.65s/it] {'loss': 1.3576, 'grad_norm': 0.0021175052951330413, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:24<21:34, 3.65s/it] 32%|███▏ | 166/520 [10:28<21:28, 3.64s/it] {'loss': 1.3496, 'grad_norm': 0.0024782999778077634, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:28<21:28, 3.64s/it] 32%|███▏ | 167/520 [10:32<21:28, 3.65s/it] {'loss': 1.3384, 'grad_norm': 0.0026313707357380157, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:32<21:28, 3.65s/it] 32%|███▏ | 168/520 [10:35<21:23, 3.64s/it] {'loss': 1.2719, 'grad_norm': 0.002172441593367003, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:35<21:23, 3.64s/it] 32%|███▎ | 169/520 [10:39<21:19, 3.65s/it] {'loss': 1.3487, 'grad_norm': 0.0020869124678244387, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:39<21:19, 3.65s/it] 33%|███▎ | 170/520 [10:43<21:40, 3.72s/it] {'loss': 1.3483, 'grad_norm': 0.0024168220943501365, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:43<21:40, 3.72s/it] 33%|███▎ | 171/520 [10:46<21:32, 3.70s/it] {'loss': 1.2857, 'grad_norm': 0.002481251179078722, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:46<21:32, 3.70s/it] 33%|███▎ | 172/520 [10:50<21:24, 3.69s/it] {'loss': 1.3538, 'grad_norm': 0.0022026138509898606, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:50<21:24, 3.69s/it] 33%|███▎ | 173/520 [10:54<21:19, 3.69s/it] {'loss': 1.2884, 'grad_norm': 0.0021479200698406088, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:54<21:19, 3.69s/it] 33%|███▎ | 174/520 [10:58<21:18, 3.69s/it] {'loss': 1.3645, 'grad_norm': 0.0026990050032901307, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:58<21:18, 3.69s/it] 34%|███▎ | 175/520 [11:01<21:14, 3.70s/it] {'loss': 1.2589, 'grad_norm': 0.0021680674888963185, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:01<21:14, 3.70s/it] 34%|███▍ | 176/520 [11:05<21:11, 3.70s/it] {'loss': 1.4351, 'grad_norm': 0.002064002489825112, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:05<21:11, 3.70s/it] 34%|███▍ | 177/520 [11:09<21:24, 3.75s/it] {'loss': 1.3123, 'grad_norm': 0.0024803922575853165, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:09<21:24, 3.75s/it] 34%|███▍ | 178/520 [11:13<21:35, 3.79s/it] {'loss': 1.3332, 'grad_norm': 0.0024296201395635473, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:13<21:35, 3.79s/it] 34%|███▍ | 179/520 [11:17<21:38, 3.81s/it] {'loss': 1.4164, 'grad_norm': 0.002116377734236366, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:17<21:38, 3.81s/it] 35%|███▍ | 180/520 [11:20<21:46, 3.84s/it] {'loss': 1.3289, 'grad_norm': 0.002330157906860202, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:20<21:46, 3.84s/it] 35%|███▍ | 181/520 [11:24<21:41, 3.84s/it] {'loss': 1.2995, 'grad_norm': 0.0020469228459450307, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:24<21:41, 3.84s/it] 35%|███▌ | 182/520 [11:28<21:29, 3.81s/it] {'loss': 1.3152, 'grad_norm': 0.002288786267375459, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:28<21:29, 3.81s/it] 35%|███▌ | 183/520 [11:32<21:13, 3.78s/it] {'loss': 1.3452, 'grad_norm': 0.0021556190093915736, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:32<21:13, 3.78s/it] 35%|███▌ | 184/520 [11:35<21:01, 3.75s/it] {'loss': 1.2582, 'grad_norm': 0.002188047417803661, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:35<21:01, 3.75s/it] 36%|███▌ | 185/520 [11:39<20:57, 3.75s/it] {'loss': 1.4326, 'grad_norm': 0.002132003552226488, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:39<20:57, 3.75s/it] 36%|███▌ | 186/520 [11:43<20:45, 3.73s/it] {'loss': 1.2828, 'grad_norm': 0.002212421818418324, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:43<20:45, 3.73s/it] 36%|███▌ | 187/520 [11:47<20:37, 3.72s/it] {'loss': 1.2892, 'grad_norm': 0.002639928233192292, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:47<20:37, 3.72s/it] 36%|███▌ | 188/520 [11:50<20:26, 3.70s/it] {'loss': 1.3694, 'grad_norm': 0.002273462756224878, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:50<20:26, 3.70s/it] 36%|███▋ | 189/520 [11:54<20:31, 3.72s/it] {'loss': 1.3804, 'grad_norm': 0.0020345142322534667, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:54<20:31, 3.72s/it] 37%|███▋ | 190/520 [11:58<20:42, 3.77s/it] {'loss': 1.292, 'grad_norm': 0.0022516949272813796, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:58<20:42, 3.77s/it] 37%|███▋ | 191/520 [12:02<20:47, 3.79s/it] {'loss': 1.2489, 'grad_norm': 0.0020189936871900156, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:02<20:47, 3.79s/it] 37%|███▋ | 192/520 [12:06<20:50, 3.81s/it] {'loss': 1.3361, 'grad_norm': 0.0021484656065073717, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:06<20:50, 3.81s/it] 37%|███▋ | 193/520 [12:09<20:46, 3.81s/it] {'loss': 1.3633, 'grad_norm': 0.0024963509498953686, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:09<20:46, 3.81s/it] 37%|███▋ | 194/520 [12:13<20:46, 3.82s/it] {'loss': 1.2406, 'grad_norm': 0.0023948903315746946, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:13<20:46, 3.82s/it] 38%|███▊ | 195/520 [12:17<20:44, 3.83s/it] {'loss': 1.3578, 'grad_norm': 0.002220572758832079, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:17<20:44, 3.83s/it] 38%|███▊ | 196/520 [12:21<20:42, 3.83s/it] {'loss': 1.3247, 'grad_norm': 0.0022583817778406177, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:21<20:42, 3.83s/it] 38%|███▊ | 197/520 [12:25<20:36, 3.83s/it] {'loss': 1.2822, 'grad_norm': 0.002170137054975653, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:25<20:36, 3.83s/it] 38%|███▊ | 198/520 [12:29<20:33, 3.83s/it] {'loss': 1.3537, 'grad_norm': 0.002285892257009869, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:29<20:33, 3.83s/it] 38%|███▊ | 199/520 [12:32<20:14, 3.78s/it] {'loss': 1.2724, 'grad_norm': 0.002316471313754678, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:32<20:14, 3.78s/it] 38%|███▊ | 200/520 [12:36<20:00, 3.75s/it] {'loss': 1.2949, 'grad_norm': 0.0022942320667698104, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:36<20:00, 3.75s/it] 39%|███▊ | 201/520 [12:40<19:49, 3.73s/it] {'loss': 1.3146, 'grad_norm': 0.0019798342507750515, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:40<19:49, 3.73s/it] 39%|███▉ | 202/520 [12:43<19:38, 3.71s/it] {'loss': 1.2663, 'grad_norm': 0.002104662578042803, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:43<19:38, 3.71s/it] 39%|███▉ | 203/520 [12:47<19:29, 3.69s/it] {'loss': 1.3194, 'grad_norm': 0.0022198821123220846, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:47<19:29, 3.69s/it] 39%|███▉ | 204/520 [12:51<19:23, 3.68s/it] {'loss': 1.3469, 'grad_norm': 0.0023151009668356147, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:51<19:23, 3.68s/it] 39%|███▉ | 205/520 [12:54<19:16, 3.67s/it] {'loss': 1.3206, 'grad_norm': 0.002164951296896004, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:54<19:16, 3.67s/it] 40%|███▉ | 206/520 [12:58<19:13, 3.67s/it] {'loss': 1.3853, 'grad_norm': 0.0022511067048402813, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:58<19:13, 3.67s/it] 40%|███▉ | 207/520 [13:02<19:18, 3.70s/it] {'loss': 1.3009, 'grad_norm': 0.001964058003240454, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:02<19:18, 3.70s/it] 40%|████ | 208/520 [13:05<19:13, 3.70s/it] {'loss': 1.3613, 'grad_norm': 0.002488608293586563, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:05<19:13, 3.70s/it] 40%|████ | 209/520 [13:09<19:09, 3.70s/it] {'loss': 1.2704, 'grad_norm': 0.0020792482338735647, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:09<19:09, 3.70s/it] 40%|████ | 210/520 [13:13<19:05, 3.69s/it] {'loss': 1.3612, 'grad_norm': 0.0023473351238711073, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:13<19:05, 3.69s/it] 41%|████ | 211/520 [13:16<19:01, 3.69s/it] {'loss': 1.3579, 'grad_norm': 0.0020114299938190834, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:16<19:01, 3.69s/it] 41%|████ | 212/520 [13:20<19:00, 3.70s/it] {'loss': 1.3335, 'grad_norm': 0.0020880779953284198, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:20<19:00, 3.70s/it] 41%|████ | 213/520 [13:24<18:59, 3.71s/it] {'loss': 1.3043, 'grad_norm': 0.0025770386921422574, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:24<18:59, 3.71s/it] 41%|████ | 214/520 [13:28<18:54, 3.71s/it] {'loss': 1.2878, 'grad_norm': 0.0022575936509499207, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:28<18:54, 3.71s/it] 41%|████▏ | 215/520 [13:31<18:52, 3.71s/it] {'loss': 1.2415, 'grad_norm': 0.002033487556890636, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:31<18:52, 3.71s/it] 42%|████▏ | 216/520 [13:35<18:45, 3.70s/it] {'loss': 1.2025, 'grad_norm': 0.002113832718626807, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:35<18:45, 3.70s/it] 42%|████▏ | 217/520 [13:39<18:39, 3.70s/it] {'loss': 1.3279, 'grad_norm': 0.002157632427180722, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:39<18:39, 3.70s/it] 42%|████▏ | 218/520 [13:42<18:36, 3.70s/it] {'loss': 1.3179, 'grad_norm': 0.002253528674025618, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:42<18:36, 3.70s/it] 42%|████▏ | 219/520 [13:46<18:29, 3.68s/it] {'loss': 1.3004, 'grad_norm': 0.001946994781348815, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:46<18:29, 3.68s/it] 42%|████▏ | 220/520 [13:50<18:21, 3.67s/it] {'loss': 1.2897, 'grad_norm': 0.0020605711067787674, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:50<18:21, 3.67s/it] 42%|████▎ | 221/520 [13:53<18:17, 3.67s/it] {'loss': 1.3292, 'grad_norm': 0.002176260055407198, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:53<18:17, 3.67s/it] 43%|████▎ | 222/520 [13:57<18:16, 3.68s/it] {'loss': 1.2377, 'grad_norm': 0.002054422432295535, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:57<18:16, 3.68s/it] 43%|████▎ | 223/520 [14:01<18:16, 3.69s/it] {'loss': 1.2324, 'grad_norm': 0.0019941419660947763, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:01<18:16, 3.69s/it] 43%|████▎ | 224/520 [14:04<18:11, 3.69s/it] {'loss': 1.4223, 'grad_norm': 0.0031456425721599754, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:04<18:11, 3.69s/it] 43%|████▎ | 225/520 [14:08<18:07, 3.69s/it] {'loss': 1.2479, 'grad_norm': 0.002099966146696624, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:08<18:07, 3.69s/it] 43%|████▎ | 226/520 [14:12<18:03, 3.68s/it] {'loss': 1.3517, 'grad_norm': 0.002015164881840589, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:12<18:03, 3.68s/it] 44%|████▎ | 227/520 [14:15<17:58, 3.68s/it] {'loss': 1.3411, 'grad_norm': 0.0020048787778378834, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:15<17:58, 3.68s/it] 44%|████▍ | 228/520 [14:19<17:56, 3.69s/it] {'loss': 1.4309, 'grad_norm': 0.002270614243617113, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:19<17:56, 3.69s/it] 44%|████▍ | 229/520 [14:23<17:51, 3.68s/it] {'loss': 1.3102, 'grad_norm': 0.001849000084159085, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:23<17:51, 3.68s/it] 44%|████▍ | 230/520 [14:27<17:47, 3.68s/it] {'loss': 1.1982, 'grad_norm': 0.002148124623281568, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:27<17:47, 3.68s/it] 44%|████▍ | 231/520 [14:30<17:45, 3.69s/it] {'loss': 1.257, 'grad_norm': 0.0018817835568722342, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:30<17:45, 3.69s/it] 45%|████▍ | 232/520 [14:34<17:42, 3.69s/it] {'loss': 1.4517, 'grad_norm': 0.0024225986683562343, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:34<17:42, 3.69s/it] 45%|████▍ | 233/520 [14:38<17:38, 3.69s/it] {'loss': 1.3452, 'grad_norm': 0.0024955234315876223, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:38<17:38, 3.69s/it] 45%|████▌ | 234/520 [14:41<17:37, 3.70s/it] {'loss': 1.2045, 'grad_norm': 0.002106537425309261, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:41<17:37, 3.70s/it] 45%|████▌ | 235/520 [14:45<17:29, 3.68s/it] {'loss': 1.2596, 'grad_norm': 0.002275315420826016, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:45<17:29, 3.68s/it] 45%|████▌ | 236/520 [14:49<17:25, 3.68s/it] {'loss': 1.363, 'grad_norm': 0.0019385341060346745, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:49<17:25, 3.68s/it] 46%|████▌ | 237/520 [14:52<17:22, 3.68s/it] {'loss': 1.3334, 'grad_norm': 0.002027278590129424, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:52<17:22, 3.68s/it] 46%|████▌ | 238/520 [14:56<17:17, 3.68s/it] {'loss': 1.273, 'grad_norm': 0.002154945794189606, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:56<17:17, 3.68s/it] 46%|████▌ | 239/520 [15:00<17:11, 3.67s/it] {'loss': 1.368, 'grad_norm': 0.0021398534039337486, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:00<17:11, 3.67s/it] 46%|████▌ | 240/520 [15:03<17:05, 3.66s/it] {'loss': 1.144, 'grad_norm': 0.0021445662309518836, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:03<17:05, 3.66s/it] 46%|████▋ | 241/520 [15:07<17:04, 3.67s/it] {'loss': 1.2353, 'grad_norm': 0.0019603198392472306, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:07<17:04, 3.67s/it] 47%|████▋ | 242/520 [15:11<17:01, 3.67s/it] {'loss': 1.2585, 'grad_norm': 0.0018945271378010271, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:11<17:01, 3.67s/it] 47%|████▋ | 243/520 [15:14<16:54, 3.66s/it] {'loss': 1.2407, 'grad_norm': 0.0020516148482692133, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:14<16:54, 3.66s/it] 47%|████▋ | 244/520 [15:18<16:48, 3.65s/it] {'loss': 1.3732, 'grad_norm': 0.002078554975251491, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:18<16:48, 3.65s/it] 47%|████▋ | 245/520 [15:22<16:45, 3.66s/it] {'loss': 1.2308, 'grad_norm': 0.0019524348326969924, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:22<16:45, 3.66s/it] 47%|████▋ | 246/520 [15:25<16:41, 3.65s/it] {'loss': 1.4223, 'grad_norm': 0.002193323167595714, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:25<16:41, 3.65s/it] 48%|████▊ | 247/520 [15:29<16:37, 3.65s/it] {'loss': 1.4169, 'grad_norm': 0.0021456700354930826, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:29<16:37, 3.65s/it] 48%|████▊ | 248/520 [15:32<16:31, 3.64s/it] {'loss': 1.2385, 'grad_norm': 0.002129244360096299, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:32<16:31, 3.64s/it] 48%|████▊ | 249/520 [15:36<16:26, 3.64s/it] {'loss': 1.3371, 'grad_norm': 0.0020903345622006113, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:36<16:26, 3.64s/it] 48%|████▊ | 250/520 [15:40<16:25, 3.65s/it] {'loss': 1.2697, 'grad_norm': 0.0021955700465044075, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:40<16:25, 3.65s/it] 48%|████▊ | 251/520 [15:43<16:20, 3.64s/it] {'loss': 1.3352, 'grad_norm': 0.001880191558848192, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:43<16:20, 3.64s/it] 48%|████▊ | 252/520 [15:47<16:18, 3.65s/it] {'loss': 1.3225, 'grad_norm': 0.0020672958606045002, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:47<16:18, 3.65s/it] 49%|████▊ | 253/520 [15:51<16:16, 3.66s/it] {'loss': 1.3282, 'grad_norm': 0.002286926396165721, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:51<16:16, 3.66s/it] 49%|████▉ | 254/520 [15:54<16:10, 3.65s/it] {'loss': 1.257, 'grad_norm': 0.001904042235266621, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:54<16:10, 3.65s/it] 49%|████▉ | 255/520 [15:58<16:06, 3.65s/it] {'loss': 1.2649, 'grad_norm': 0.0022150767519509428, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [15:58<16:06, 3.65s/it] 49%|████▉ | 256/520 [16:02<16:01, 3.64s/it] {'loss': 1.3128, 'grad_norm': 0.0021791707968606597, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:02<16:01, 3.64s/it] 49%|████▉ | 257/520 [16:05<15:54, 3.63s/it] {'loss': 1.3013, 'grad_norm': 0.002140277453054153, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:05<15:54, 3.63s/it] 50%|████▉ | 258/520 [16:09<15:59, 3.66s/it] {'loss': 1.3121, 'grad_norm': 0.0018430422867205586, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:09<15:59, 3.66s/it] 50%|████▉ | 259/520 [16:13<15:53, 3.65s/it] {'loss': 1.3772, 'grad_norm': 0.0023360375027179662, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:13<15:53, 3.65s/it] 50%|█████ | 260/520 [16:16<15:49, 3.65s/it] {'loss': 1.3819, 'grad_norm': 0.0020613139025654117, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:16<15:49, 3.65s/it] 50%|█████ | 261/520 [16:20<15:44, 3.65s/it] {'loss': 1.3168, 'grad_norm': 0.00209766356065032, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:20<15:44, 3.65s/it] 50%|█████ | 262/520 [16:24<15:39, 3.64s/it] {'loss': 1.2263, 'grad_norm': 0.002084602128795267, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:24<15:39, 3.64s/it] 51%|█████ | 263/520 [16:27<15:45, 3.68s/it] {'loss': 1.3208, 'grad_norm': 0.0022054684531576884, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:27<15:45, 3.68s/it] 51%|█████ | 264/520 [16:31<15:55, 3.73s/it] {'loss': 1.3416, 'grad_norm': 0.002066023408156831, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:31<15:55, 3.73s/it] 51%|█████ | 265/520 [16:35<15:59, 3.76s/it] {'loss': 1.2433, 'grad_norm': 0.0023527393445409036, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:35<15:59, 3.76s/it] 51%|█████ | 266/520 [16:39<15:50, 3.74s/it] {'loss': 1.1018, 'grad_norm': 0.0018596627007612312, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:39<15:50, 3.74s/it] 51%|█████▏ | 267/520 [16:42<15:41, 3.72s/it] {'loss': 1.2396, 'grad_norm': 0.0019692922546982804, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:42<15:41, 3.72s/it] 52%|█████▏ | 268/520 [16:46<15:34, 3.71s/it] {'loss': 1.4502, 'grad_norm': 0.002835209940967879, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:46<15:34, 3.71s/it] 52%|█████▏ | 269/520 [16:50<15:30, 3.71s/it] {'loss': 1.3551, 'grad_norm': 0.0021738232125250757, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:50<15:30, 3.71s/it] 52%|█████▏ | 270/520 [16:54<15:42, 3.77s/it] {'loss': 1.2493, 'grad_norm': 0.0019665900731554984, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:54<15:42, 3.77s/it] 52%|█████▏ | 271/520 [16:58<15:48, 3.81s/it] {'loss': 1.34, 'grad_norm': 0.002148443368298055, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [16:58<15:48, 3.81s/it] 52%|█████▏ | 272/520 [17:01<15:51, 3.84s/it] {'loss': 1.2761, 'grad_norm': 0.002319134081272651, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:01<15:51, 3.84s/it] 52%|█████▎ | 273/520 [17:05<15:51, 3.85s/it] {'loss': 1.3951, 'grad_norm': 0.002392249573663261, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:05<15:51, 3.85s/it] 53%|█████▎ | 274/520 [17:09<15:52, 3.87s/it] {'loss': 1.3016, 'grad_norm': 0.002222376090572508, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:09<15:52, 3.87s/it] 53%|█████▎ | 275/520 [17:13<15:54, 3.89s/it] {'loss': 1.2439, 'grad_norm': 0.0022078532137789846, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:13<15:54, 3.89s/it] 53%|█████▎ | 276/520 [17:17<15:49, 3.89s/it] {'loss': 1.3127, 'grad_norm': 0.002191274867070237, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:17<15:49, 3.89s/it] 53%|█████▎ | 277/520 [17:21<15:46, 3.90s/it] {'loss': 1.3806, 'grad_norm': 0.002285074207345595, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:21<15:46, 3.90s/it] 53%|█████▎ | 278/520 [17:25<15:42, 3.89s/it] {'loss': 1.1936, 'grad_norm': 0.0019866094223195525, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:25<15:42, 3.89s/it] 54%|█████▎ | 279/520 [17:29<15:38, 3.89s/it] {'loss': 1.2799, 'grad_norm': 0.0023806774006220103, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:29<15:38, 3.89s/it] 54%|█████▍ | 280/520 [17:33<15:36, 3.90s/it] {'loss': 1.2363, 'grad_norm': 0.002222670469555707, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:33<15:36, 3.90s/it] 54%|█████▍ | 281/520 [17:37<15:33, 3.91s/it] {'loss': 1.3527, 'grad_norm': 0.0022431641358854684, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:37<15:33, 3.91s/it] 54%|█████▍ | 282/520 [17:41<15:30, 3.91s/it] {'loss': 1.2034, 'grad_norm': 0.0019690603440022047, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:41<15:30, 3.91s/it] 54%|█████▍ | 283/520 [17:45<15:28, 3.92s/it] {'loss': 1.3714, 'grad_norm': 0.0023054653725408433, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:45<15:28, 3.92s/it] 55%|█████▍ | 284/520 [17:48<15:24, 3.92s/it] {'loss': 1.2591, 'grad_norm': 0.002241660585731297, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:48<15:24, 3.92s/it] 55%|█████▍ | 285/520 [17:52<15:03, 3.84s/it] {'loss': 1.2359, 'grad_norm': 0.002175300671028239, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:52<15:03, 3.84s/it] 55%|█████▌ | 286/520 [17:56<14:46, 3.79s/it] {'loss': 1.1006, 'grad_norm': 0.0022583112498914164, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:56<14:46, 3.79s/it] 55%|█████▌ | 287/520 [17:59<14:34, 3.75s/it] {'loss': 1.3459, 'grad_norm': 0.0021635451943817254, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [17:59<14:34, 3.75s/it] 55%|█████▌ | 288/520 [18:03<14:24, 3.73s/it] {'loss': 1.3855, 'grad_norm': 0.0023210821788194383, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:03<14:24, 3.73s/it] 56%|█████▌ | 289/520 [18:07<14:17, 3.71s/it] {'loss': 1.2493, 'grad_norm': 0.0019886268032378715, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:07<14:17, 3.71s/it] 56%|█████▌ | 290/520 [18:10<14:10, 3.70s/it] {'loss': 1.1689, 'grad_norm': 0.0019304665957131707, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:10<14:10, 3.70s/it] 56%|█████▌ | 291/520 [18:14<14:05, 3.69s/it] {'loss': 1.227, 'grad_norm': 0.002207657903542193, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:14<14:05, 3.69s/it] 56%|█████▌ | 292/520 [18:18<14:00, 3.69s/it] {'loss': 1.2757, 'grad_norm': 0.0020330895028484344, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:18<14:00, 3.69s/it] 56%|█████▋ | 293/520 [18:21<13:55, 3.68s/it] {'loss': 1.2154, 'grad_norm': 0.0022287314706798205, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:21<13:55, 3.68s/it] 57%|█████▋ | 294/520 [18:25<13:53, 3.69s/it] {'loss': 1.2446, 'grad_norm': 0.0022113929536564814, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:25<13:53, 3.69s/it] 57%|█████▋ | 295/520 [18:29<13:49, 3.69s/it] {'loss': 1.3181, 'grad_norm': 0.0022776001073931836, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:29<13:49, 3.69s/it] 57%|█████▋ | 296/520 [18:33<13:47, 3.69s/it] {'loss': 1.1887, 'grad_norm': 0.002258710243692238, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:33<13:47, 3.69s/it] 57%|█████▋ | 297/520 [18:36<13:41, 3.68s/it] {'loss': 1.3192, 'grad_norm': 0.0022744219651314773, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:36<13:41, 3.68s/it] 57%|█████▋ | 298/520 [18:40<13:36, 3.68s/it] {'loss': 1.28, 'grad_norm': 0.0018635444297394155, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:40<13:36, 3.68s/it] 57%|█████▊ | 299/520 [18:44<13:32, 3.68s/it] {'loss': 1.3377, 'grad_norm': 0.0018832842470617181, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:44<13:32, 3.68s/it] 58%|█████▊ | 300/520 [18:47<13:27, 3.67s/it] {'loss': 1.3397, 'grad_norm': 0.0020848575552029026, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:47<13:27, 3.67s/it] 58%|█████▊ | 301/520 [18:51<13:25, 3.68s/it] {'loss': 1.3075, 'grad_norm': 0.002092910011448672, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:51<13:25, 3.68s/it] 58%|█████▊ | 302/520 [18:55<13:21, 3.68s/it] {'loss': 1.3597, 'grad_norm': 0.0021631202678311858, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:55<13:21, 3.68s/it] 58%|█████▊ | 303/520 [18:58<13:21, 3.69s/it] {'loss': 1.2421, 'grad_norm': 0.0023962622204508647, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:58<13:21, 3.69s/it] 58%|█████▊ | 304/520 [19:02<13:23, 3.72s/it] {'loss': 1.2442, 'grad_norm': 0.0022521565490914303, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:02<13:23, 3.72s/it] 59%|█████▊ | 305/520 [19:06<13:14, 3.69s/it] {'loss': 1.3494, 'grad_norm': 0.0023155314507931984, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:06<13:14, 3.69s/it] 59%|█████▉ | 306/520 [19:09<13:10, 3.69s/it] {'loss': 1.2895, 'grad_norm': 0.0021150220684157464, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:09<13:10, 3.69s/it] 59%|█████▉ | 307/520 [19:13<13:27, 3.79s/it] {'loss': 1.2239, 'grad_norm': 0.0019265895875906478, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:13<13:27, 3.79s/it] 59%|█████▉ | 308/520 [19:17<13:17, 3.76s/it] {'loss': 1.3454, 'grad_norm': 0.002102528724571073, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:17<13:17, 3.76s/it] 59%|█████▉ | 309/520 [19:21<13:11, 3.75s/it] {'loss': 1.2256, 'grad_norm': 0.0019365307686279821, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:21<13:11, 3.75s/it] 60%|█████▉ | 310/520 [19:25<13:03, 3.73s/it] {'loss': 1.2027, 'grad_norm': 0.0020141470110962892, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:25<13:03, 3.73s/it] 60%|█████▉ | 311/520 [19:28<12:57, 3.72s/it] {'loss': 1.1775, 'grad_norm': 0.0019406749398112103, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:28<12:57, 3.72s/it] 60%|██████ | 312/520 [19:32<12:50, 3.70s/it] {'loss': 1.1679, 'grad_norm': 0.0022396118430422747, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:32<12:50, 3.70s/it] 60%|██████ | 313/520 [19:36<12:45, 3.70s/it] {'loss': 1.1563, 'grad_norm': 0.0018626155694935176, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:36<12:45, 3.70s/it] 60%|██████ | 314/520 [19:40<13:05, 3.81s/it] {'loss': 1.1982, 'grad_norm': 0.0018715375012895575, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:40<13:05, 3.81s/it] 61%|██████ | 315/520 [19:43<12:55, 3.79s/it] {'loss': 1.309, 'grad_norm': 0.002606300667269472, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:43<12:55, 3.79s/it] 61%|██████ | 316/520 [19:48<13:13, 3.89s/it] {'loss': 1.1708, 'grad_norm': 0.0024479527268523767, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:48<13:13, 3.89s/it] 61%|██████ | 317/520 [19:51<12:56, 3.83s/it] {'loss': 1.1901, 'grad_norm': 0.001841398868156548, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:51<12:56, 3.83s/it] 61%|██████ | 318/520 [19:55<12:43, 3.78s/it] {'loss': 1.3165, 'grad_norm': 0.002339278070182083, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:55<12:43, 3.78s/it] 61%|██████▏ | 319/520 [19:59<12:53, 3.85s/it] {'loss': 1.176, 'grad_norm': 0.001973212784067095, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [19:59<12:53, 3.85s/it] 62%|██████▏ | 320/520 [20:03<12:37, 3.79s/it] {'loss': 1.1175, 'grad_norm': 0.0020971747134403917, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:03<12:37, 3.79s/it] 62%|██████▏ | 321/520 [20:06<12:27, 3.75s/it] {'loss': 1.3205, 'grad_norm': 0.0021858797555290156, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:06<12:27, 3.75s/it] 62%|██████▏ | 322/520 [20:10<12:20, 3.74s/it] {'loss': 1.1797, 'grad_norm': 0.002004084489608074, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:10<12:20, 3.74s/it] 62%|██████▏ | 323/520 [20:14<12:14, 3.73s/it] {'loss': 1.2569, 'grad_norm': 0.0023521180412957154, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:14<12:14, 3.73s/it] 62%|██████▏ | 324/520 [20:17<12:06, 3.71s/it] {'loss': 1.2556, 'grad_norm': 0.002344882976773188, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:17<12:06, 3.71s/it] 62%|██████▎ | 325/520 [20:21<12:01, 3.70s/it] {'loss': 1.2682, 'grad_norm': 0.0021572846848781284, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:21<12:01, 3.70s/it] 63%|██████▎ | 326/520 [20:25<11:56, 3.69s/it] {'loss': 1.2503, 'grad_norm': 0.002061517347903394, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:25<11:56, 3.69s/it] 63%|██████▎ | 327/520 [20:28<11:55, 3.71s/it] {'loss': 1.3239, 'grad_norm': 0.0024313383432789325, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:28<11:55, 3.71s/it] 63%|██████▎ | 328/520 [20:32<11:52, 3.71s/it] {'loss': 1.3107, 'grad_norm': 0.0021404542136785783, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:32<11:52, 3.71s/it] 63%|██████▎ | 329/520 [20:36<11:45, 3.70s/it] {'loss': 1.1719, 'grad_norm': 0.0017872564011263295, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:36<11:45, 3.70s/it] 63%|██████▎ | 330/520 [20:39<11:44, 3.71s/it] {'loss': 1.2463, 'grad_norm': 0.001885425762412761, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:39<11:44, 3.71s/it] 64%|██████▎ | 331/520 [20:43<11:43, 3.72s/it] {'loss': 1.2069, 'grad_norm': 0.0019807498659068527, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:43<11:43, 3.72s/it] 64%|██████▍ | 332/520 [20:47<11:39, 3.72s/it] {'loss': 1.3328, 'grad_norm': 0.0021420381732391047, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:47<11:39, 3.72s/it] 64%|██████▍ | 333/520 [20:51<11:32, 3.70s/it] {'loss': 1.3646, 'grad_norm': 0.002120200699267516, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:51<11:32, 3.70s/it] 64%|██████▍ | 334/520 [20:54<11:27, 3.69s/it] {'loss': 1.2537, 'grad_norm': 0.0023441127377765466, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:54<11:27, 3.69s/it] 64%|██████▍ | 335/520 [20:58<11:27, 3.71s/it] {'loss': 1.2483, 'grad_norm': 0.0018537893096395716, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:58<11:27, 3.71s/it] 65%|██████▍ | 336/520 [21:02<11:29, 3.75s/it] {'loss': 1.1455, 'grad_norm': 0.002252256556844344, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:02<11:29, 3.75s/it] 65%|██████▍ | 337/520 [21:06<11:28, 3.76s/it] {'loss': 1.1382, 'grad_norm': 0.0020872567474755094, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:06<11:28, 3.76s/it] 65%|██████▌ | 338/520 [21:09<11:27, 3.78s/it] {'loss': 1.2617, 'grad_norm': 0.0020656007966045106, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:09<11:27, 3.78s/it] 65%|██████▌ | 339/520 [21:13<11:18, 3.75s/it] {'loss': 1.2041, 'grad_norm': 0.0020164953122542603, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:13<11:18, 3.75s/it] 65%|██████▌ | 340/520 [21:17<11:12, 3.74s/it] {'loss': 1.1918, 'grad_norm': 0.0020303465020686325, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:17<11:12, 3.74s/it] 66%|██████▌ | 341/520 [21:21<11:05, 3.72s/it] {'loss': 1.217, 'grad_norm': 0.0021253454144835936, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:21<11:05, 3.72s/it] 66%|██████▌ | 342/520 [21:24<10:59, 3.71s/it] {'loss': 1.2938, 'grad_norm': 0.002319745620393236, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:24<10:59, 3.71s/it] 66%|██████▌ | 343/520 [21:28<10:56, 3.71s/it] {'loss': 1.2592, 'grad_norm': 0.002074685367487991, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:28<10:56, 3.71s/it] 66%|██████▌ | 344/520 [21:32<10:51, 3.70s/it] {'loss': 1.1637, 'grad_norm': 0.0020970565752965, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:32<10:51, 3.70s/it] 66%|██████▋ | 345/520 [21:35<10:45, 3.69s/it] {'loss': 1.2841, 'grad_norm': 0.002276175592122003, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:35<10:45, 3.69s/it] 67%|██████▋ | 346/520 [21:39<10:43, 3.70s/it] {'loss': 1.2603, 'grad_norm': 0.0019370153565015163, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:39<10:43, 3.70s/it] 67%|██████▋ | 347/520 [21:43<10:38, 3.69s/it] {'loss': 1.1858, 'grad_norm': 0.0019114631307657315, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:43<10:38, 3.69s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:46<10:34, 3.69s/it] {'loss': 1.1476, 'grad_norm': 0.002451787166851132, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:46<10:34, 3.69s/it] 67%|██████▋ | 349/520 [21:50<10:31, 3.70s/it] {'loss': 1.1861, 'grad_norm': 0.00212809014883698, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:50<10:31, 3.70s/it] 67%|██████▋ | 350/520 [21:54<10:27, 3.69s/it] {'loss': 1.2313, 'grad_norm': 0.0021448561980133883, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:54<10:27, 3.69s/it] 68%|██████▊ | 351/520 [21:57<10:23, 3.69s/it] {'loss': 1.1347, 'grad_norm': 0.001918698348038822, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:57<10:23, 3.69s/it] 68%|██████▊ | 352/520 [22:01<10:22, 3.70s/it] {'loss': 1.258, 'grad_norm': 0.0019367838858406561, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:01<10:22, 3.70s/it] 68%|██████▊ | 353/520 [22:05<10:18, 3.70s/it] {'loss': 1.2066, 'grad_norm': 0.0016883571405327984, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:05<10:18, 3.70s/it] 68%|██████▊ | 354/520 [22:09<10:12, 3.69s/it] {'loss': 1.3407, 'grad_norm': 0.0019814914440747034, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:09<10:12, 3.69s/it] 68%|██████▊ | 355/520 [22:12<10:07, 3.68s/it] {'loss': 1.195, 'grad_norm': 0.002052862067644898, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:12<10:07, 3.68s/it] 68%|██████▊ | 356/520 [22:16<10:03, 3.68s/it] {'loss': 1.1956, 'grad_norm': 0.002129545645502168, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:16<10:03, 3.68s/it] 69%|██████▊ | 357/520 [22:20<09:59, 3.68s/it] {'loss': 1.2175, 'grad_norm': 0.0019117747031980558, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:20<09:59, 3.68s/it] 69%|██████▉ | 358/520 [22:23<09:56, 3.68s/it] {'loss': 1.1513, 'grad_norm': 0.0019714551547816007, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:23<09:56, 3.68s/it] 69%|██████▉ | 359/520 [22:27<09:52, 3.68s/it] {'loss': 1.2735, 'grad_norm': 0.0021866997081608688, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:27<09:52, 3.68s/it] 69%|██████▉ | 360/520 [22:31<09:49, 3.69s/it] {'loss': 1.3028, 'grad_norm': 0.0021907532667102486, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:31<09:49, 3.69s/it] 69%|██████▉ | 361/520 [22:34<09:45, 3.68s/it] {'loss': 1.2802, 'grad_norm': 0.0019843294025331786, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:34<09:45, 3.68s/it] 70%|██████▉ | 362/520 [22:38<09:40, 3.68s/it] {'loss': 1.216, 'grad_norm': 0.002158520827624312, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:38<09:40, 3.68s/it] 70%|██████▉ | 363/520 [22:42<09:37, 3.68s/it] {'loss': 1.2357, 'grad_norm': 0.002029416950354889, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:42<09:37, 3.68s/it] 70%|███████ | 364/520 [22:45<09:35, 3.69s/it] {'loss': 1.3039, 'grad_norm': 0.0020089751760062158, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:45<09:35, 3.69s/it] 70%|███████ | 365/520 [22:49<09:30, 3.68s/it] {'loss': 1.2969, 'grad_norm': 0.002124626753312334, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:49<09:30, 3.68s/it] 70%|███████ | 366/520 [22:53<09:26, 3.68s/it] {'loss': 1.248, 'grad_norm': 0.0019917156639633494, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:53<09:26, 3.68s/it] 71%|███████ | 367/520 [22:56<09:22, 3.68s/it] {'loss': 1.2479, 'grad_norm': 0.001975796516108528, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [22:56<09:22, 3.68s/it] 71%|███████ | 368/520 [23:00<09:22, 3.70s/it] {'loss': 1.1017, 'grad_norm': 0.002052787454512458, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:00<09:22, 3.70s/it] 71%|███████ | 369/520 [23:04<09:18, 3.70s/it] {'loss': 1.2601, 'grad_norm': 0.001817872076207225, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:04<09:18, 3.70s/it] 71%|███████ | 370/520 [23:08<09:16, 3.71s/it] {'loss': 1.1558, 'grad_norm': 0.0018883883344182016, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:08<09:16, 3.71s/it] 71%|███████▏ | 371/520 [23:11<09:13, 3.72s/it] {'loss': 1.1603, 'grad_norm': 0.002097539623057769, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:11<09:13, 3.72s/it] 72%|███████▏ | 372/520 [23:15<09:08, 3.71s/it] {'loss': 1.3436, 'grad_norm': 0.0018050061128641384, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:15<09:08, 3.71s/it] 72%|███████▏ | 373/520 [23:19<09:06, 3.72s/it] {'loss': 1.223, 'grad_norm': 0.002120345671583554, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:19<09:06, 3.72s/it] 72%|███████▏ | 374/520 [23:22<09:01, 3.71s/it] {'loss': 1.2451, 'grad_norm': 0.0020061089790237296, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:22<09:01, 3.71s/it] 72%|███████▏ | 375/520 [23:26<08:57, 3.70s/it] {'loss': 1.1581, 'grad_norm': 0.002075673920360943, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:26<08:57, 3.70s/it] 72%|███████▏ | 376/520 [23:30<08:52, 3.70s/it] {'loss': 1.2782, 'grad_norm': 0.0019281939932221013, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:30<08:52, 3.70s/it] 72%|███████▎ | 377/520 [23:33<08:48, 3.70s/it] {'loss': 1.2095, 'grad_norm': 0.002043576018430237, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:33<08:48, 3.70s/it] 73%|███████▎ | 378/520 [23:37<08:42, 3.68s/it] {'loss': 1.2655, 'grad_norm': 0.0019380104687636843, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:37<08:42, 3.68s/it] 73%|███████▎ | 379/520 [23:41<08:38, 3.68s/it] {'loss': 1.2414, 'grad_norm': 0.0018773048186731566, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:41<08:38, 3.68s/it] 73%|███████▎ | 380/520 [23:44<08:34, 3.67s/it] {'loss': 1.3149, 'grad_norm': 0.002185173132133006, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:44<08:34, 3.67s/it] 73%|███████▎ | 381/520 [23:48<08:30, 3.67s/it] {'loss': 1.2425, 'grad_norm': 0.0019551259477824066, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:48<08:30, 3.67s/it] 73%|███████▎ | 382/520 [23:52<08:27, 3.68s/it] {'loss': 1.2671, 'grad_norm': 0.0020786367444439683, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:52<08:27, 3.68s/it] 74%|███████▎ | 383/520 [23:55<08:23, 3.68s/it] {'loss': 1.0813, 'grad_norm': 0.002076790020784943, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:55<08:23, 3.68s/it] 74%|███████▍ | 384/520 [23:59<08:19, 3.67s/it] {'loss': 1.3454, 'grad_norm': 0.0021425707064937983, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [23:59<08:19, 3.67s/it] 74%|███████▍ | 385/520 [24:03<08:15, 3.67s/it] {'loss': 1.2208, 'grad_norm': 0.001830142583565089, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:03<08:15, 3.67s/it] 74%|███████▍ | 386/520 [24:06<08:10, 3.66s/it] {'loss': 1.1724, 'grad_norm': 0.0017001229575200195, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:06<08:10, 3.66s/it] 74%|███████▍ | 387/520 [24:10<08:08, 3.67s/it] {'loss': 1.3401, 'grad_norm': 0.0019505173241741549, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:10<08:08, 3.67s/it] 75%|███████▍ | 388/520 [24:14<08:05, 3.68s/it] {'loss': 1.1231, 'grad_norm': 0.0018654854227557044, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:14<08:05, 3.68s/it] 75%|███████▍ | 389/520 [24:17<08:00, 3.67s/it] {'loss': 1.1768, 'grad_norm': 0.002272217350226325, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:17<08:00, 3.67s/it] 75%|███████▌ | 390/520 [24:21<07:57, 3.67s/it] {'loss': 1.2383, 'grad_norm': 0.0018868414317828527, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:21<07:57, 3.67s/it] 75%|███████▌ | 391/520 [24:25<07:54, 3.68s/it] {'loss': 1.3179, 'grad_norm': 0.0020930218072982535, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:25<07:54, 3.68s/it] 75%|███████▌ | 392/520 [24:29<07:50, 3.68s/it] {'loss': 1.1324, 'grad_norm': 0.0019210867421095364, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:29<07:50, 3.68s/it] 76%|███████▌ | 393/520 [24:32<07:45, 3.67s/it] {'loss': 1.1636, 'grad_norm': 0.001814603521751698, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:32<07:45, 3.67s/it] 76%|███████▌ | 394/520 [24:36<07:42, 3.67s/it] {'loss': 1.1956, 'grad_norm': 0.002101015587122267, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:36<07:42, 3.67s/it] 76%|███████▌ | 395/520 [24:39<07:38, 3.67s/it] {'loss': 1.157, 'grad_norm': 0.002131419179906133, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:40<07:38, 3.67s/it] 76%|███████▌ | 396/520 [24:43<07:35, 3.67s/it] {'loss': 1.2405, 'grad_norm': 0.0020250525771682735, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:43<07:35, 3.67s/it] 76%|███████▋ | 397/520 [24:47<07:31, 3.67s/it] {'loss': 1.2216, 'grad_norm': 0.0018660603793924143, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:47<07:31, 3.67s/it] 77%|███████▋ | 398/520 [24:51<07:30, 3.69s/it] {'loss': 1.2233, 'grad_norm': 0.0020436498110635807, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:51<07:30, 3.69s/it] 77%|███████▋ | 399/520 [24:54<07:30, 3.73s/it] {'loss': 1.209, 'grad_norm': 0.0019348735426335127, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [24:54<07:30, 3.73s/it] 77%|███████▋ | 400/520 [24:58<07:25, 3.71s/it] {'loss': 1.2492, 'grad_norm': 0.001961150254542122, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [24:58<07:25, 3.71s/it] 77%|███████▋ | 401/520 [25:02<07:19, 3.70s/it] {'loss': 1.0489, 'grad_norm': 0.002086170336070891, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:02<07:19, 3.70s/it] 77%|███████▋ | 402/520 [25:05<07:14, 3.68s/it] {'loss': 1.1701, 'grad_norm': 0.0021594038546643855, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:05<07:14, 3.68s/it] 78%|███████▊ | 403/520 [25:09<07:10, 3.68s/it] {'loss': 1.196, 'grad_norm': 0.0022438022616267524, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:09<07:10, 3.68s/it] 78%|███████▊ | 404/520 [25:13<07:05, 3.66s/it] {'loss': 1.1113, 'grad_norm': 0.0023810289588368336, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:13<07:05, 3.66s/it] 78%|███████▊ | 405/520 [25:16<07:01, 3.66s/it] {'loss': 1.2103, 'grad_norm': 0.0019090935705848265, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:16<07:01, 3.66s/it] 78%|███████▊ | 406/520 [25:20<06:57, 3.66s/it] {'loss': 1.1451, 'grad_norm': 0.002382834580567223, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:20<06:57, 3.66s/it] 78%|███████▊ | 407/520 [25:24<06:53, 3.66s/it] {'loss': 1.2893, 'grad_norm': 0.0020144434277979744, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:24<06:53, 3.66s/it] 78%|███████▊ | 408/520 [25:27<06:49, 3.65s/it] {'loss': 1.1873, 'grad_norm': 0.0021247461427615385, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:27<06:49, 3.65s/it] 79%|███████▊ | 409/520 [25:31<06:55, 3.74s/it] {'loss': 1.3118, 'grad_norm': 0.002177143816402767, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:31<06:55, 3.74s/it] 79%|███████▉ | 410/520 [25:35<06:56, 3.78s/it] {'loss': 1.0369, 'grad_norm': 0.001949990884762016, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:35<06:56, 3.78s/it] 79%|███████▉ | 411/520 [25:39<06:56, 3.82s/it] {'loss': 1.2884, 'grad_norm': 0.0023836247476217275, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:39<06:56, 3.82s/it] 79%|███████▉ | 412/520 [25:43<06:54, 3.84s/it] {'loss': 1.2034, 'grad_norm': 0.002099522544456501, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:43<06:54, 3.84s/it] 79%|███████▉ | 413/520 [25:47<06:52, 3.85s/it] {'loss': 1.2367, 'grad_norm': 0.0019298916181654476, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:47<06:52, 3.85s/it] 80%|███████▉ | 414/520 [25:51<06:49, 3.86s/it] {'loss': 1.0367, 'grad_norm': 0.0016756477382549285, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:51<06:49, 3.86s/it] 80%|███████▉ | 415/520 [25:55<06:46, 3.87s/it] {'loss': 1.1721, 'grad_norm': 0.0019027945819670108, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [25:55<06:46, 3.87s/it] 80%|████████ | 416/520 [25:58<06:42, 3.87s/it] {'loss': 1.0908, 'grad_norm': 0.002162445675431115, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [25:58<06:42, 3.87s/it] 80%|████████ | 417/520 [26:02<06:38, 3.87s/it] {'loss': 1.2623, 'grad_norm': 0.0021678364445153506, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:02<06:38, 3.87s/it] 80%|████████ | 418/520 [26:06<06:35, 3.87s/it] {'loss': 1.2398, 'grad_norm': 0.001834147392771681, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:06<06:35, 3.87s/it] 81%|████████ | 419/520 [26:10<06:31, 3.88s/it] {'loss': 1.2289, 'grad_norm': 0.002121090010846648, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:10<06:31, 3.88s/it] 81%|████████ | 420/520 [26:14<06:28, 3.88s/it] {'loss': 1.1197, 'grad_norm': 0.002102473826515942, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:14<06:28, 3.88s/it] 81%|████████ | 421/520 [26:18<06:24, 3.89s/it] {'loss': 1.0563, 'grad_norm': 0.0021941907580243923, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:18<06:24, 3.89s/it] 81%|████████ | 422/520 [26:22<06:21, 3.89s/it] {'loss': 1.1778, 'grad_norm': 0.002006866108553191, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:22<06:21, 3.89s/it] 81%|████████▏ | 423/520 [26:26<06:17, 3.89s/it] {'loss': 1.1596, 'grad_norm': 0.0022244273957388876, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:26<06:17, 3.89s/it] 82%|████████▏ | 424/520 [26:30<06:14, 3.90s/it] {'loss': 1.3194, 'grad_norm': 0.002058511318785514, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:30<06:14, 3.90s/it] 82%|████████▏ | 425/520 [26:33<06:09, 3.89s/it] {'loss': 1.1708, 'grad_norm': 0.0019487083475006356, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:33<06:09, 3.89s/it] 82%|████████▏ | 426/520 [26:37<06:05, 3.89s/it] {'loss': 1.1909, 'grad_norm': 0.0026243385402740325, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:37<06:05, 3.89s/it] 82%|████████▏ | 427/520 [26:41<06:02, 3.89s/it] {'loss': 1.1055, 'grad_norm': 0.0019469701747011872, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:41<06:02, 3.89s/it] 82%|████████▏ | 428/520 [26:45<05:55, 3.87s/it] {'loss': 1.0825, 'grad_norm': 0.0020109399718086654, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:45<05:55, 3.87s/it] 82%|████████▎ | 429/520 [26:49<05:47, 3.82s/it] {'loss': 1.181, 'grad_norm': 0.0019049190462804104, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:49<05:47, 3.82s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:52<05:39, 3.77s/it] {'loss': 1.1781, 'grad_norm': 0.0018046743266082947, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:52<05:39, 3.77s/it] 83%|████████▎ | 431/520 [26:56<05:33, 3.75s/it] {'loss': 1.2067, 'grad_norm': 0.002138834339380418, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [26:56<05:33, 3.75s/it] 83%|████████▎ | 432/520 [27:00<05:28, 3.73s/it] {'loss': 1.0969, 'grad_norm': 0.0020982153211891058, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:00<05:28, 3.73s/it] 83%|████████▎ | 433/520 [27:03<05:22, 3.71s/it] {'loss': 1.2268, 'grad_norm': 0.0020019145274563998, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:03<05:22, 3.71s/it] 83%|████████▎ | 434/520 [27:07<05:18, 3.70s/it] {'loss': 0.9644, 'grad_norm': 0.0018883465945947141, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:07<05:18, 3.70s/it] 84%|████████▎ | 435/520 [27:11<05:14, 3.71s/it] {'loss': 1.259, 'grad_norm': 0.0023560153702803987, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:11<05:14, 3.71s/it] 84%|████████▍ | 436/520 [27:15<05:10, 3.70s/it] {'loss': 1.0511, 'grad_norm': 0.002004272324770036, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:15<05:10, 3.70s/it] 84%|████████▍ | 437/520 [27:18<05:05, 3.69s/it] {'loss': 1.2855, 'grad_norm': 0.0019820415032661687, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:18<05:05, 3.69s/it] 84%|████████▍ | 438/520 [27:22<05:02, 3.69s/it] {'loss': 1.0951, 'grad_norm': 0.0019412552476566138, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:22<05:02, 3.69s/it] 84%|████████▍ | 439/520 [27:26<04:58, 3.68s/it] {'loss': 1.1745, 'grad_norm': 0.0018341497599758342, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:26<04:58, 3.68s/it] 85%|████████▍ | 440/520 [27:29<04:55, 3.69s/it] {'loss': 1.1364, 'grad_norm': 0.001923980125295913, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:29<04:55, 3.69s/it] 85%|████████▍ | 441/520 [27:33<04:51, 3.69s/it] {'loss': 1.198, 'grad_norm': 0.001885701928664696, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:33<04:51, 3.69s/it] 85%|████████▌ | 442/520 [27:37<04:47, 3.68s/it] {'loss': 1.194, 'grad_norm': 0.0022235768924821894, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:37<04:47, 3.68s/it] 85%|████████▌ | 443/520 [27:40<04:43, 3.68s/it] {'loss': 1.2125, 'grad_norm': 0.0020396802461444775, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:40<04:43, 3.68s/it] 85%|████████▌ | 444/520 [27:44<04:39, 3.68s/it] {'loss': 1.1798, 'grad_norm': 0.0017940063717589124, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:44<04:39, 3.68s/it] 86%|████████▌ | 445/520 [27:48<04:35, 3.67s/it] {'loss': 1.0974, 'grad_norm': 0.0018799014302961503, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:48<04:35, 3.67s/it] 86%|████████▌ | 446/520 [27:51<04:32, 3.68s/it] {'loss': 1.2777, 'grad_norm': 0.0018783106208224174, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:51<04:32, 3.68s/it] 86%|████████▌ | 447/520 [27:55<04:29, 3.69s/it] {'loss': 1.1944, 'grad_norm': 0.0019929001268095235, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [27:55<04:29, 3.69s/it] 86%|████████▌ | 448/520 [27:59<04:25, 3.69s/it] {'loss': 1.1683, 'grad_norm': 0.0020389451117427816, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [27:59<04:25, 3.69s/it] 86%|████████▋ | 449/520 [28:02<04:21, 3.69s/it] {'loss': 1.2275, 'grad_norm': 0.001983517061415689, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:02<04:21, 3.69s/it] 87%|████████▋ | 450/520 [28:06<04:18, 3.70s/it] {'loss': 1.2062, 'grad_norm': 0.001959595632895285, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:06<04:18, 3.70s/it] 87%|████████▋ | 451/520 [28:10<04:15, 3.70s/it] {'loss': 1.2022, 'grad_norm': 0.0019529999645084837, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:10<04:15, 3.70s/it] 87%|████████▋ | 452/520 [28:14<04:13, 3.72s/it] {'loss': 1.2695, 'grad_norm': 0.0018683043805329937, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:14<04:13, 3.72s/it] 87%|████████▋ | 453/520 [28:17<04:08, 3.71s/it] {'loss': 1.2549, 'grad_norm': 0.0019610034570224813, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:17<04:08, 3.71s/it] 87%|████████▋ | 454/520 [28:21<04:04, 3.70s/it] {'loss': 1.1148, 'grad_norm': 0.0021826300638841337, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:21<04:04, 3.70s/it] 88%|████████▊ | 455/520 [28:25<03:59, 3.69s/it] {'loss': 1.2505, 'grad_norm': 0.0019291688137396796, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:25<03:59, 3.69s/it] 88%|████████▊ | 456/520 [28:28<03:55, 3.68s/it] {'loss': 1.1686, 'grad_norm': 0.001989421076970733, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:28<03:55, 3.68s/it] 88%|████████▊ | 457/520 [28:32<03:51, 3.68s/it] {'loss': 1.1809, 'grad_norm': 0.0018273363207793368, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:32<03:51, 3.68s/it] 88%|████████▊ | 458/520 [28:36<03:48, 3.68s/it] {'loss': 1.3082, 'grad_norm': 0.0021044470879419573, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:36<03:48, 3.68s/it] 88%|████████▊ | 459/520 [28:39<03:44, 3.68s/it] {'loss': 1.2481, 'grad_norm': 0.0020410310581271233, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:39<03:44, 3.68s/it] 88%|████████▊ | 460/520 [28:43<03:40, 3.67s/it] {'loss': 1.1196, 'grad_norm': 0.001973732564965987, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:43<03:40, 3.67s/it] 89%|████████▊ | 461/520 [28:47<03:37, 3.69s/it] {'loss': 1.2691, 'grad_norm': 0.00164427020978691, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:47<03:37, 3.69s/it] 89%|████████▉ | 462/520 [28:50<03:34, 3.70s/it] {'loss': 1.3171, 'grad_norm': 0.0019079498459283943, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:50<03:34, 3.70s/it] 89%|████████▉ | 463/520 [28:54<03:30, 3.69s/it] {'loss': 1.0819, 'grad_norm': 0.002083094858443529, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [28:54<03:30, 3.69s/it] 89%|████████▉ | 464/520 [28:58<03:26, 3.68s/it] {'loss': 1.2201, 'grad_norm': 0.0020822276073985658, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [28:58<03:26, 3.68s/it] 89%|████████▉ | 465/520 [29:02<03:22, 3.69s/it] {'loss': 1.3367, 'grad_norm': 0.002248365400733598, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:02<03:22, 3.69s/it] 90%|████████▉ | 466/520 [29:05<03:19, 3.69s/it] {'loss': 1.2106, 'grad_norm': 0.0017922480590802199, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:05<03:19, 3.69s/it] 90%|████████▉ | 467/520 [29:09<03:15, 3.69s/it] {'loss': 1.2012, 'grad_norm': 0.001865703518887425, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:09<03:15, 3.69s/it] 90%|█████████ | 468/520 [29:13<03:14, 3.74s/it] {'loss': 1.1874, 'grad_norm': 0.0022684195925740374, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:13<03:14, 3.74s/it] 90%|█████████ | 469/520 [29:16<03:10, 3.73s/it] {'loss': 1.2408, 'grad_norm': 0.002167041836427524, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:16<03:10, 3.73s/it] 90%|█████████ | 470/520 [29:20<03:06, 3.72s/it] {'loss': 1.1266, 'grad_norm': 0.001777050597658979, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:20<03:06, 3.72s/it] 91%|█████████ | 471/520 [29:24<03:01, 3.71s/it] {'loss': 1.1377, 'grad_norm': 0.0019849152291859786, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:24<03:01, 3.71s/it] 91%|█████████ | 472/520 [29:28<02:57, 3.71s/it] {'loss': 1.1153, 'grad_norm': 0.0019716543209762163, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:28<02:57, 3.71s/it] 91%|█████████ | 473/520 [29:31<02:54, 3.70s/it] {'loss': 1.1731, 'grad_norm': 0.001967426230200823, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:31<02:54, 3.70s/it] 91%|█████████ | 474/520 [29:35<02:50, 3.71s/it] {'loss': 1.2394, 'grad_norm': 0.0018385166325491193, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:35<02:50, 3.71s/it] 91%|█████████▏| 475/520 [29:39<02:46, 3.70s/it] {'loss': 1.1565, 'grad_norm': 0.0018276674550215294, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:39<02:46, 3.70s/it] 92%|█████████▏| 476/520 [29:42<02:42, 3.70s/it] {'loss': 1.168, 'grad_norm': 0.0019716898451589493, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:42<02:42, 3.70s/it] 92%|█████████▏| 477/520 [29:46<02:38, 3.69s/it] {'loss': 1.1542, 'grad_norm': 0.0021919598792735718, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:46<02:38, 3.69s/it] 92%|█████████▏| 478/520 [29:50<02:35, 3.70s/it] {'loss': 1.1122, 'grad_norm': 0.001954624582568313, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:50<02:35, 3.70s/it] 92%|█████████▏| 479/520 [29:53<02:31, 3.69s/it] {'loss': 1.2116, 'grad_norm': 0.0020114864835869273, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:53<02:31, 3.69s/it] 92%|█████████▏| 480/520 [29:57<02:27, 3.68s/it] {'loss': 1.2316, 'grad_norm': 0.001890466647817883, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [29:57<02:27, 3.68s/it] 92%|█████████▎| 481/520 [30:01<02:24, 3.70s/it] {'loss': 1.2268, 'grad_norm': 0.0017865356208240561, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:01<02:24, 3.70s/it] 93%|█████████▎| 482/520 [30:04<02:20, 3.69s/it] {'loss': 1.2377, 'grad_norm': 0.0019436979730077092, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:04<02:20, 3.69s/it] 93%|█████████▎| 483/520 [30:08<02:16, 3.69s/it] {'loss': 1.1804, 'grad_norm': 0.002132716779844159, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:08<02:16, 3.69s/it] 93%|█████████▎| 484/520 [30:12<02:13, 3.70s/it] {'loss': 1.1927, 'grad_norm': 0.0020294125955146567, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:12<02:13, 3.70s/it] 93%|█████████▎| 485/520 [30:16<02:09, 3.70s/it] {'loss': 1.1327, 'grad_norm': 0.0019084997214416104, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:16<02:09, 3.70s/it] 93%|█████████▎| 486/520 [30:19<02:05, 3.68s/it] {'loss': 1.2591, 'grad_norm': 0.0020806673997698032, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:19<02:05, 3.68s/it] 94%|█████████▎| 487/520 [30:23<02:01, 3.68s/it] {'loss': 1.1126, 'grad_norm': 0.001899565669908697, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:23<02:01, 3.68s/it] 94%|█████████▍| 488/520 [30:27<01:57, 3.68s/it] {'loss': 1.0542, 'grad_norm': 0.001992852974017282, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:27<01:57, 3.68s/it] 94%|█████████▍| 489/520 [30:30<01:54, 3.68s/it] {'loss': 1.2352, 'grad_norm': 0.0016865159110603975, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:30<01:54, 3.68s/it] 94%|█████████▍| 490/520 [30:34<01:49, 3.66s/it] {'loss': 1.1741, 'grad_norm': 0.0020568036807285645, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:34<01:49, 3.66s/it] 94%|█████████▍| 491/520 [30:38<01:46, 3.67s/it] {'loss': 1.1347, 'grad_norm': 0.0021308434624675416, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:38<01:46, 3.67s/it] 95%|█████████▍| 492/520 [30:41<01:42, 3.67s/it] {'loss': 1.259, 'grad_norm': 0.0021348803682503213, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:41<01:42, 3.67s/it] 95%|█████████▍| 493/520 [30:45<01:39, 3.67s/it] {'loss': 1.2641, 'grad_norm': 0.002034945914025727, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:45<01:39, 3.67s/it] 95%|█████████▌| 494/520 [30:49<01:35, 3.68s/it] {'loss': 1.1914, 'grad_norm': 0.001810155542364804, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:49<01:35, 3.68s/it] 95%|█████████▌| 495/520 [30:52<01:31, 3.68s/it] {'loss': 1.1554, 'grad_norm': 0.001919797447723383, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [30:52<01:31, 3.68s/it] 95%|█████████▌| 496/520 [30:56<01:28, 3.68s/it] {'loss': 1.0713, 'grad_norm': 0.0020289032150731744, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [30:56<01:28, 3.68s/it] 96%|█████████▌| 497/520 [31:00<01:24, 3.68s/it] {'loss': 1.1652, 'grad_norm': 0.0017015357938872985, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:00<01:24, 3.68s/it] 96%|█████████▌| 498/520 [31:03<01:20, 3.68s/it] {'loss': 1.1555, 'grad_norm': 0.0022153307156684674, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:03<01:20, 3.68s/it] 96%|█████████▌| 499/520 [31:07<01:17, 3.68s/it] {'loss': 1.3083, 'grad_norm': 0.00196055211883318, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:07<01:17, 3.68s/it] 96%|█████████▌| 500/520 [31:11<01:13, 3.69s/it] {'loss': 1.2684, 'grad_norm': 0.0022576290142794584, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:11<01:13, 3.69s/it] 96%|█████████▋| 501/520 [31:14<01:09, 3.68s/it] {'loss': 1.2186, 'grad_norm': 0.0021983505755826752, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:14<01:09, 3.68s/it] 97%|█████████▋| 502/520 [31:18<01:06, 3.69s/it] {'loss': 1.1967, 'grad_norm': 0.0018814240971714506, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:18<01:06, 3.69s/it] 97%|█████████▋| 503/520 [31:22<01:02, 3.69s/it] {'loss': 1.204, 'grad_norm': 0.0020160684778521864, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:22<01:02, 3.69s/it] 97%|█████████▋| 504/520 [31:25<00:58, 3.69s/it] {'loss': 1.1928, 'grad_norm': 0.002340763660273902, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:25<00:58, 3.69s/it] 97%|█████████▋| 505/520 [31:29<00:55, 3.69s/it] {'loss': 1.2282, 'grad_norm': 0.001967752212805151, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:29<00:55, 3.69s/it] 97%|█████████▋| 506/520 [31:33<00:51, 3.68s/it] {'loss': 1.1483, 'grad_norm': 0.002123363114370274, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:33<00:51, 3.68s/it] 98%|█████████▊| 507/520 [31:37<00:47, 3.69s/it] {'loss': 1.3479, 'grad_norm': 0.0018528967225959346, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:37<00:47, 3.69s/it] 98%|█████████▊| 508/520 [31:40<00:44, 3.69s/it] {'loss': 1.268, 'grad_norm': 0.002018655418260605, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:40<00:44, 3.69s/it] 98%|█████████▊| 509/520 [31:44<00:40, 3.68s/it] {'loss': 1.2313, 'grad_norm': 0.0019410422079743413, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:44<00:40, 3.68s/it] 98%|█████████▊| 510/520 [31:48<00:36, 3.68s/it] {'loss': 1.1808, 'grad_norm': 0.0019184772154520518, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:48<00:36, 3.68s/it] 98%|█████████▊| 511/520 [31:51<00:33, 3.69s/it] {'loss': 1.167, 'grad_norm': 0.001868210937935462, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [31:51<00:33, 3.69s/it] 98%|█████████▊| 512/520 [31:55<00:29, 3.68s/it] {'loss': 1.0429, 'grad_norm': 0.0020184195087348117, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [31:55<00:29, 3.68s/it] 99%|█████████▊| 513/520 [31:59<00:25, 3.67s/it] {'loss': 1.247, 'grad_norm': 0.002119084826354095, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [31:59<00:25, 3.67s/it] 99%|█████████▉| 514/520 [32:02<00:22, 3.70s/it] {'loss': 1.2144, 'grad_norm': 0.0019011792663164368, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:02<00:22, 3.70s/it] 99%|█████████▉| 515/520 [32:06<00:18, 3.69s/it] {'loss': 1.2623, 'grad_norm': 0.0022482602100381125, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:06<00:18, 3.69s/it] 99%|█████████▉| 516/520 [32:10<00:14, 3.69s/it] {'loss': 1.159, 'grad_norm': 0.0019172918012880986, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:10<00:14, 3.69s/it] 99%|█████████▉| 517/520 [32:13<00:10, 3.67s/it] {'loss': 1.2691, 'grad_norm': 0.00205576609536345, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:13<00:10, 3.67s/it] 100%|█████████▉| 518/520 [32:17<00:07, 3.64s/it] {'loss': 1.1766, 'grad_norm': 0.0020760298165280787, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:17<00:07, 3.64s/it] 100%|█████████▉| 519/520 [32:21<00:03, 3.65s/it] {'loss': 1.2045, 'grad_norm': 0.0018935537833727, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:21<00:03, 3.65s/it] 100%|██████████| 520/520 [32:25<00:00, 3.93s/it] {'loss': 1.2586, 'grad_norm': 0.00223317854698027, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:25<00:00, 3.93s/it] {'train_runtime': 1945.7019, 'train_samples_per_second': 34.193, 'train_steps_per_second': 0.267, 'train_loss': 1.3349474989450896, 'epoch': 1.0} + 100%|██████████| 520/520 [32:25<00:00, 3.93s/it] 100%|██████████| 520/520 [32:25<00:00, 3.74s/it] +[2025-10-13 16:21:36,713] [INFO] [launch.py:348:main] Process 868629 exits successfully. +[2025-10-13 16:21:36,713] [INFO] [launch.py:348:main] Process 868624 exits successfully. +[2025-10-13 16:21:36,714] [INFO] [launch.py:348:main] Process 868626 exits successfully. +[2025-10-13 16:21:36,714] [INFO] [launch.py:348:main] Process 868625 exits successfully. +[2025-10-13 16:21:37,715] [INFO] [launch.py:348:main] Process 868627 exits successfully. +[2025-10-13 16:21:37,716] [INFO] [launch.py:348:main] Process 868628 exits successfully. +[2025-10-13 16:21:37,716] [INFO] [launch.py:348:main] Process 868623 exits successfully. +[2025-10-13 16:21:40,719] [INFO] [launch.py:348:main] Process 868622 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_2.9_2e-1_connector-3.0_2.9_2e-1_ablation_20251013_154739.log +Timestamp: 2025-10-13 16:21:43 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation_20251013_162143.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation_20251013_162143.log new file mode 100644 index 0000000000000000000000000000000000000000..85b9a9d2b03df0e4cc6dbce9d6be39a2f83badb9 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation_20251013_162143.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation_20251013_162143.log +Timestamp: 2025-10-13 16:21:43 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 16:21:45,836] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:48,521] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 16:21:48,523] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 0.7 --temperature_mlp_text 0.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 0.7 --temperature_mlp_vision 0.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 0.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 16:21:51,103] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:52,131] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 16:21:52,131] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 16:21:52,132] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 16:21:52,132] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 16:21:52,132] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 16:21:52,132] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 16:21:52,132] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 16:21:52,134] [INFO] [launch.py:253:main] process 888519 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:21:52,136] [INFO] [launch.py:253:main] process 888520 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:21:52,139] [INFO] [launch.py:253:main] process 888521 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:21:52,141] [INFO] [launch.py:253:main] process 888522 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:21:52,143] [INFO] [launch.py:253:main] process 888523 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:21:52,145] [INFO] [launch.py:253:main] process 888524 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:21:52,147] [INFO] [launch.py:253:main] process 888525 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:21:52,149] [INFO] [launch.py:253:main] process 888526 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 16:21:58,980] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:59,014] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:59,089] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:59,165] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:59,239] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:59,239] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:59,239] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:59,247] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:21:59,395] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:21:59,423] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:21:59,499] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:21:59,573] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:21:59,648] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:21:59,648] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:21:59,650] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:21:59,660] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:21:59,660] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.7, 'temperature_mlp': 0.7, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.7, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.7, + "temperature_mlp": 0.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:888519:888519 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888519:888519 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888519:888519 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:888519:888519 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:888519:888519 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:888519:888519 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:888525:888525 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:888525:888525 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888525:888525 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888525:888525 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:888525:888525 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:888525:888525 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:888524:888524 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:888524:888524 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888524:888524 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888524:888524 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:888524:888524 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:888524:888524 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:888526:888526 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:888526:888526 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888526:888526 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888526:888526 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:888526:888526 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:888526:888526 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:888520:888520 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:888520:888520 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888520:888520 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888521:888521 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:888520:888520 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:888520:888520 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:888520:888520 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:888521:888521 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888521:888521 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888522:888522 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:888522:888522 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888522:888522 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888521:888521 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:888521:888521 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:888521:888521 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:888522:888522 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:888522:888522 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:888522:888522 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:888523:888523 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:888523:888523 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888523:888523 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888523:888523 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:888523:888523 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:888523:888523 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO ncclCommInitRank comm 0x56116496a7d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x29610e14e0713c7c - Init START +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO ncclCommInitRank comm 0x55981579fcf0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x29610e14e0713c7c - Init START +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO ncclCommInitRank comm 0x5597ac83c7c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x29610e14e0713c7c - Init START +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO ncclCommInitRank comm 0x55e13c477be0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x29610e14e0713c7c - Init START +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO ncclCommInitRank comm 0x562d6aae7d60 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x29610e14e0713c7c - Init START +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO ncclCommInitRank comm 0x55e1bf7b4410 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x29610e14e0713c7c - Init START +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO ncclCommInitRank comm 0x563e46dc5e50 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x29610e14e0713c7c - Init START +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO ncclCommInitRank comm 0x55d329519c80 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x29610e14e0713c7c - Init START +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO comm 0x56116496a7d0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO comm 0x55e1bf7b4410 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO comm 0x55e13c477be0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO comm 0x5597ac83c7c0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO comm 0x562d6aae7d60 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO comm 0x55d329519c80 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO comm 0x55981579fcf0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO comm 0x563e46dc5e50 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:888525:890181 [6] NCCL INFO ncclCommInitRank comm 0x55e13c477be0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x29610e14e0713c7c - Init COMPLETE +ywang29-vrdb-test1-worker-0:888526:890183 [7] NCCL INFO ncclCommInitRank comm 0x56116496a7d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x29610e14e0713c7c - Init COMPLETE +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:888524:890182 [5] NCCL INFO ncclCommInitRank comm 0x55d329519c80 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x29610e14e0713c7c - Init COMPLETE +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:888522:890186 [3] NCCL INFO ncclCommInitRank comm 0x55981579fcf0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x29610e14e0713c7c - Init COMPLETE +ywang29-vrdb-test1-worker-0:888520:890184 [1] NCCL INFO ncclCommInitRank comm 0x5597ac83c7c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x29610e14e0713c7c - Init COMPLETE +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:888519:890180 [0] NCCL INFO ncclCommInitRank comm 0x55e1bf7b4410 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x29610e14e0713c7c - Init COMPLETE +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:888521:890185 [2] NCCL INFO ncclCommInitRank comm 0x562d6aae7d60 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x29610e14e0713c7c - Init COMPLETE +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:888523:890187 [4] NCCL INFO ncclCommInitRank comm 0x563e46dc5e50 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x29610e14e0713c7c - Init COMPLETE +[2025-10-13 16:22:40,267] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 16:22:42,073] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 16:23:09,602 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 16:23:09,607 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:002->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:888526:895245 [7] NCCL INFO ncclCommInitRank comm 0x7f32f806a540 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x182470afa58f1850 - Init COMPLETE +ywang29-vrdb-test1-worker-0:888523:895249 [4] NCCL INFO ncclCommInitRank comm 0x7fb8e006ac00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x182470afa58f1850 - Init COMPLETE +ywang29-vrdb-test1-worker-0:888525:895244 [6] NCCL INFO ncclCommInitRank comm 0x7fe02006aa10 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x182470afa58f1850 - Init COMPLETE +ywang29-vrdb-test1-worker-0:888519:895243 [0] NCCL INFO ncclCommInitRank comm 0x7f715c06ac10 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x182470afa58f1850 - Init COMPLETE +ywang29-vrdb-test1-worker-0:888522:895250 [3] NCCL INFO ncclCommInitRank comm 0x7f506006a790 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x182470afa58f1850 - Init COMPLETE +ywang29-vrdb-test1-worker-0:888521:895246 [2] NCCL INFO ncclCommInitRank comm 0x7ff01c06a8f0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x182470afa58f1850 - Init COMPLETE +ywang29-vrdb-test1-worker-0:888520:895248 [1] NCCL INFO ncclCommInitRank comm 0x7f320006abd0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x182470afa58f1850 - Init COMPLETE +ywang29-vrdb-test1-worker-0:888524:895247 [5] NCCL INFO ncclCommInitRank comm 0x7fae2006aa60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x182470afa58f1850 - Init COMPLETE + 0%| | 1/520 [00:14<2:03:31, 14.28s/it] {'loss': 2.0497, 'grad_norm': 0.0, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:03:31, 14.28s/it] 0%| | 2/520 [00:17<1:09:11, 8.01s/it] {'loss': 2.06, 'grad_norm': 0.0, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:11, 8.01s/it] 1%| | 3/520 [00:21<51:47, 6.01s/it] {'loss': 2.1958, 'grad_norm': 0.0, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:47, 6.01s/it] 1%| | 4/520 [00:25<43:36, 5.07s/it] {'loss': 2.0688, 'grad_norm': 0.0, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<43:36, 5.07s/it] 1%| | 5/520 [00:28<39:08, 4.56s/it] {'loss': 2.2403, 'grad_norm': 0.0, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:08, 4.56s/it] 1%| | 6/520 [00:32<36:24, 4.25s/it] {'loss': 1.6782, 'grad_norm': 0.0, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:24, 4.25s/it] 1%|▏ | 7/520 [00:36<34:39, 4.05s/it] {'loss': 2.0829, 'grad_norm': 0.0, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<34:39, 4.05s/it] 2%|▏ | 8/520 [00:40<35:09, 4.12s/it] {'loss': 2.0585, 'grad_norm': 0.0, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:09, 4.12s/it] 2%|▏ | 9/520 [00:44<33:50, 3.97s/it] {'loss': 2.1936, 'grad_norm': 0.0, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<33:50, 3.97s/it] 2%|▏ | 10/520 [00:47<32:54, 3.87s/it] {'loss': 2.0887, 'grad_norm': 0.0, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<32:54, 3.87s/it] 2%|▏ | 11/520 [00:51<32:38, 3.85s/it] {'loss': 2.0637, 'grad_norm': 0.0, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<32:38, 3.85s/it] 2%|▏ | 12/520 [00:55<32:05, 3.79s/it] {'loss': 1.8848, 'grad_norm': 0.0, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<32:05, 3.79s/it][2025-10-13 16:24:14,683] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<33:25, 3.96s/it] {'loss': 2.0728, 'grad_norm': 0.0, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<33:25, 3.96s/it] 3%|▎ | 14/520 [01:03<32:34, 3.86s/it] {'loss': 2.1118, 'grad_norm': 0.0, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<32:34, 3.86s/it] 3%|▎ | 15/520 [01:06<31:56, 3.79s/it] {'loss': 1.7478, 'grad_norm': 0.0, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:06<31:56, 3.79s/it] 3%|▎ | 16/520 [01:10<31:27, 3.74s/it] {'loss': 1.8954, 'grad_norm': 0.0, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:10<31:27, 3.74s/it] 3%|▎ | 17/520 [01:13<31:04, 3.71s/it] {'loss': 2.1158, 'grad_norm': 0.0, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:14<31:04, 3.71s/it] 3%|▎ | 18/520 [01:17<30:46, 3.68s/it] {'loss': 2.1718, 'grad_norm': 0.0, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:17<30:46, 3.68s/it] 4%|▎ | 19/520 [01:21<30:37, 3.67s/it] {'loss': 1.8467, 'grad_norm': 0.0, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:21<30:37, 3.67s/it] 4%|▍ | 20/520 [01:24<30:28, 3.66s/it] {'loss': 2.2091, 'grad_norm': 0.0, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:24<30:28, 3.66s/it] 4%|▍ | 21/520 [01:28<30:36, 3.68s/it] {'loss': 2.0718, 'grad_norm': 0.0, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:28<30:36, 3.68s/it] 4%|▍ | 22/520 [01:32<30:50, 3.72s/it] {'loss': 2.0488, 'grad_norm': 0.0, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:32<30:50, 3.72s/it] 4%|▍ | 23/520 [01:36<30:41, 3.71s/it] {'loss': 2.0811, 'grad_norm': 0.0, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:36<30:41, 3.71s/it] 5%|▍ | 24/520 [01:39<30:28, 3.69s/it] {'loss': 1.8639, 'grad_norm': 0.0, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:39<30:28, 3.69s/it] 5%|▍ | 25/520 [01:43<30:16, 3.67s/it] {'loss': 2.2763, 'grad_norm': 0.0, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:43<30:16, 3.67s/it] 5%|▌ | 26/520 [01:47<30:07, 3.66s/it] {'loss': 1.9761, 'grad_norm': 0.0, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:47<30:07, 3.66s/it] 5%|▌ | 27/520 [01:50<29:58, 3.65s/it] {'loss': 2.055, 'grad_norm': 0.0, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:50<29:58, 3.65s/it] 5%|▌ | 28/520 [01:54<29:56, 3.65s/it] {'loss': 2.2197, 'grad_norm': 0.0, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:54<29:56, 3.65s/it] 6%|▌ | 29/520 [01:57<29:51, 3.65s/it] {'loss': 2.145, 'grad_norm': 0.0, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [01:57<29:51, 3.65s/it] 6%|▌ | 30/520 [02:01<29:51, 3.66s/it] {'loss': 1.9357, 'grad_norm': 0.0, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:01<29:51, 3.66s/it] 6%|▌ | 31/520 [02:05<29:51, 3.66s/it] {'loss': 1.9528, 'grad_norm': 0.0, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:05<29:51, 3.66s/it] 6%|▌ | 32/520 [02:08<29:49, 3.67s/it] {'loss': 1.6773, 'grad_norm': 0.0, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:08<29:49, 3.67s/it] 6%|▋ | 33/520 [02:12<29:50, 3.68s/it] {'loss': 2.148, 'grad_norm': 0.0, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:12<29:50, 3.68s/it] 7%|▋ | 34/520 [02:16<29:40, 3.66s/it] {'loss': 2.2337, 'grad_norm': 0.0, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:16<29:40, 3.66s/it] 7%|▋ | 35/520 [02:19<29:39, 3.67s/it] {'loss': 2.1437, 'grad_norm': 0.0, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:19<29:39, 3.67s/it] 7%|▋ | 36/520 [02:23<29:31, 3.66s/it] {'loss': 2.0502, 'grad_norm': 0.0, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:23<29:31, 3.66s/it] 7%|▋ | 37/520 [02:27<29:33, 3.67s/it] {'loss': 1.911, 'grad_norm': 0.0, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:27<29:33, 3.67s/it] 7%|▋ | 38/520 [02:31<29:38, 3.69s/it] {'loss': 2.0439, 'grad_norm': 0.0, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:31<29:38, 3.69s/it] 8%|▊ | 39/520 [02:34<29:25, 3.67s/it] {'loss': 2.2716, 'grad_norm': 0.0, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:34<29:25, 3.67s/it] 8%|▊ | 40/520 [02:38<29:25, 3.68s/it] {'loss': 2.0359, 'grad_norm': 0.0, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:38<29:25, 3.68s/it] 8%|▊ | 41/520 [02:42<29:18, 3.67s/it] {'loss': 2.1421, 'grad_norm': 0.0, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:42<29:18, 3.67s/it] 8%|▊ | 42/520 [02:45<29:10, 3.66s/it] {'loss': 2.3152, 'grad_norm': 0.0, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:45<29:10, 3.66s/it] 8%|▊ | 43/520 [02:49<29:24, 3.70s/it] {'loss': 1.9193, 'grad_norm': 0.0, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:49<29:24, 3.70s/it] 8%|▊ | 44/520 [02:53<29:30, 3.72s/it] {'loss': 1.9613, 'grad_norm': 0.0, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:53<29:30, 3.72s/it] 9%|▊ | 45/520 [02:56<29:24, 3.71s/it] {'loss': 2.2185, 'grad_norm': 0.0, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:56<29:24, 3.71s/it] 9%|▉ | 46/520 [03:00<29:12, 3.70s/it] {'loss': 1.9333, 'grad_norm': 0.0, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:00<29:12, 3.70s/it] 9%|▉ | 47/520 [03:04<29:05, 3.69s/it] {'loss': 2.1175, 'grad_norm': 0.0, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:04<29:05, 3.69s/it] 9%|▉ | 48/520 [03:07<28:58, 3.68s/it] {'loss': 2.2907, 'grad_norm': 0.0, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:07<28:58, 3.68s/it] 9%|▉ | 49/520 [03:11<28:51, 3.68s/it] {'loss': 2.1249, 'grad_norm': 0.0, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:11<28:51, 3.68s/it] 10%|▉ | 50/520 [03:15<28:40, 3.66s/it] {'loss': 2.1602, 'grad_norm': 0.0, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:15<28:40, 3.66s/it] 10%|▉ | 51/520 [03:18<28:36, 3.66s/it] {'loss': 2.2575, 'grad_norm': 0.0, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:18<28:36, 3.66s/it] 10%|█ | 52/520 [03:22<28:30, 3.65s/it] {'loss': 2.2524, 'grad_norm': 0.0, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:22<28:30, 3.65s/it] 10%|█ | 53/520 [03:26<28:26, 3.65s/it] {'loss': 2.0526, 'grad_norm': 0.0, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:26<28:26, 3.65s/it] 10%|█ | 54/520 [03:29<28:22, 3.65s/it] {'loss': 2.1465, 'grad_norm': 0.0, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:29<28:22, 3.65s/it] 11%|█ | 55/520 [03:33<28:21, 3.66s/it] {'loss': 2.2296, 'grad_norm': 0.0, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:33<28:21, 3.66s/it] 11%|█ | 56/520 [03:37<28:19, 3.66s/it] {'loss': 2.1652, 'grad_norm': 0.0, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:37<28:19, 3.66s/it] 11%|█ | 57/520 [03:40<28:13, 3.66s/it] {'loss': 2.1812, 'grad_norm': 0.0, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:40<28:13, 3.66s/it] 11%|█ | 58/520 [03:44<28:10, 3.66s/it] {'loss': 1.9834, 'grad_norm': 0.0, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:44<28:10, 3.66s/it] 11%|█▏ | 59/520 [03:48<28:04, 3.65s/it] {'loss': 1.8767, 'grad_norm': 0.0, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:48<28:04, 3.65s/it] 12%|█▏ | 60/520 [03:51<28:04, 3.66s/it] {'loss': 2.072, 'grad_norm': 0.0, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:51<28:04, 3.66s/it] 12%|█▏ | 61/520 [03:55<27:58, 3.66s/it] {'loss': 1.939, 'grad_norm': 0.0, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:55<27:58, 3.66s/it] 12%|█▏ | 62/520 [03:59<27:50, 3.65s/it] {'loss': 2.1336, 'grad_norm': 0.0, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [03:59<27:50, 3.65s/it] 12%|█▏ | 63/520 [04:02<27:44, 3.64s/it] {'loss': 2.0545, 'grad_norm': 0.0, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:02<27:44, 3.64s/it] 12%|█▏ | 64/520 [04:06<27:38, 3.64s/it] {'loss': 2.096, 'grad_norm': 0.0, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:06<27:38, 3.64s/it] 12%|█▎ | 65/520 [04:09<27:37, 3.64s/it] {'loss': 2.2915, 'grad_norm': 0.0, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:09<27:37, 3.64s/it] 13%|█▎ | 66/520 [04:13<27:28, 3.63s/it] {'loss': 2.0169, 'grad_norm': 0.0, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:13<27:28, 3.63s/it] 13%|█▎ | 67/520 [04:17<27:20, 3.62s/it] {'loss': 2.15, 'grad_norm': 0.0, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:17<27:20, 3.62s/it] 13%|█▎ | 68/520 [04:20<27:13, 3.61s/it] {'loss': 2.0439, 'grad_norm': 0.0, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:20<27:13, 3.61s/it] 13%|█▎ | 69/520 [04:24<27:08, 3.61s/it] {'loss': 2.2354, 'grad_norm': 0.0, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:24<27:08, 3.61s/it] 13%|█▎ | 70/520 [04:27<27:02, 3.60s/it] {'loss': 2.0978, 'grad_norm': 0.0, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:27<27:02, 3.60s/it] 14%|█▎ | 71/520 [04:31<27:01, 3.61s/it] {'loss': 2.093, 'grad_norm': 0.0, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:31<27:01, 3.61s/it] 14%|█▍ | 72/520 [04:35<26:56, 3.61s/it] {'loss': 2.188, 'grad_norm': 0.0, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:35<26:56, 3.61s/it] 14%|█▍ | 73/520 [04:38<26:56, 3.62s/it] {'loss': 2.076, 'grad_norm': 0.0, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:38<26:56, 3.62s/it] 14%|█▍ | 74/520 [04:42<26:52, 3.62s/it] {'loss': 2.1694, 'grad_norm': 0.0, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:42<26:52, 3.62s/it] 14%|█▍ | 75/520 [04:46<26:43, 3.60s/it] {'loss': 2.015, 'grad_norm': 0.0, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:46<26:43, 3.60s/it] 15%|█▍ | 76/520 [04:49<26:41, 3.61s/it] {'loss': 1.7991, 'grad_norm': 0.0, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:49<26:41, 3.61s/it] 15%|█▍ | 77/520 [04:53<26:51, 3.64s/it] {'loss': 2.2933, 'grad_norm': 0.0, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:53<26:51, 3.64s/it] 15%|█▌ | 78/520 [04:57<27:15, 3.70s/it] {'loss': 2.065, 'grad_norm': 0.0, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [04:57<27:15, 3.70s/it] 15%|█▌ | 79/520 [05:00<27:23, 3.73s/it] {'loss': 1.9893, 'grad_norm': 0.0, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:00<27:23, 3.73s/it] 15%|█▌ | 80/520 [05:04<27:28, 3.75s/it] {'loss': 1.9708, 'grad_norm': 0.0, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:04<27:28, 3.75s/it] 16%|█▌ | 81/520 [05:08<27:34, 3.77s/it] {'loss': 2.3674, 'grad_norm': 0.0, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:08<27:34, 3.77s/it] 16%|█▌ | 82/520 [05:12<27:43, 3.80s/it] {'loss': 2.1439, 'grad_norm': 0.0, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:12<27:43, 3.80s/it] 16%|█▌ | 83/520 [05:16<27:35, 3.79s/it] {'loss': 2.1668, 'grad_norm': 0.0, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:16<27:35, 3.79s/it] 16%|█▌ | 84/520 [05:19<27:29, 3.78s/it] {'loss': 2.163, 'grad_norm': 0.0, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:19<27:29, 3.78s/it] 16%|█▋ | 85/520 [05:23<27:26, 3.78s/it] {'loss': 2.1234, 'grad_norm': 0.0, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:23<27:26, 3.78s/it] 17%|█▋ | 86/520 [05:27<27:28, 3.80s/it] {'loss': 2.0339, 'grad_norm': 0.0, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:27<27:28, 3.80s/it] 17%|█▋ | 87/520 [05:31<27:26, 3.80s/it] {'loss': 2.0185, 'grad_norm': 0.0, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:31<27:26, 3.80s/it] 17%|█▋ | 88/520 [05:35<27:26, 3.81s/it] {'loss': 1.5804, 'grad_norm': 0.0, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:35<27:26, 3.81s/it] 17%|█▋ | 89/520 [05:38<27:15, 3.79s/it] {'loss': 2.1898, 'grad_norm': 0.0, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:38<27:15, 3.79s/it] 17%|█▋ | 90/520 [05:42<26:53, 3.75s/it] {'loss': 2.1876, 'grad_norm': 0.0, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:42<26:53, 3.75s/it] 18%|█▊ | 91/520 [05:46<26:32, 3.71s/it] {'loss': 1.9945, 'grad_norm': 0.0, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:46<26:32, 3.71s/it] 18%|█▊ | 92/520 [05:49<26:22, 3.70s/it] {'loss': 2.0336, 'grad_norm': 0.0, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:49<26:22, 3.70s/it] 18%|█▊ | 93/520 [05:53<26:09, 3.67s/it] {'loss': 2.2198, 'grad_norm': 0.0, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:53<26:09, 3.67s/it] 18%|█▊ | 94/520 [05:57<25:59, 3.66s/it] {'loss': 2.091, 'grad_norm': 0.0, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [05:57<25:59, 3.66s/it] 18%|█▊ | 95/520 [06:00<25:52, 3.65s/it] {'loss': 2.276, 'grad_norm': 0.0, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:00<25:52, 3.65s/it] 18%|█▊ | 96/520 [06:04<25:45, 3.64s/it] {'loss': 2.0205, 'grad_norm': 0.0, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:04<25:45, 3.64s/it] 19%|█▊ | 97/520 [06:08<25:38, 3.64s/it] {'loss': 2.3655, 'grad_norm': 0.0, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:08<25:38, 3.64s/it] 19%|█▉ | 98/520 [06:11<25:35, 3.64s/it] {'loss': 1.9072, 'grad_norm': 0.0, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:11<25:35, 3.64s/it] 19%|█▉ | 99/520 [06:15<25:29, 3.63s/it] {'loss': 2.1488, 'grad_norm': 0.0, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:15<25:29, 3.63s/it] 19%|█▉ | 100/520 [06:18<25:25, 3.63s/it] {'loss': 1.9139, 'grad_norm': 0.0, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:18<25:25, 3.63s/it] 19%|█▉ | 101/520 [06:22<25:27, 3.65s/it] {'loss': 2.1148, 'grad_norm': 0.0, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:22<25:27, 3.65s/it] 20%|█▉ | 102/520 [06:26<25:19, 3.64s/it] {'loss': 2.3567, 'grad_norm': 0.0, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:26<25:19, 3.64s/it] 20%|█▉ | 103/520 [06:29<25:12, 3.63s/it] {'loss': 1.9617, 'grad_norm': 0.0, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:29<25:12, 3.63s/it] 20%|██ | 104/520 [06:33<25:17, 3.65s/it] {'loss': 2.2072, 'grad_norm': 0.0, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:33<25:17, 3.65s/it] 20%|██ | 105/520 [06:37<25:14, 3.65s/it] {'loss': 2.068, 'grad_norm': 0.0, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:37<25:14, 3.65s/it] 20%|██ | 106/520 [06:41<25:32, 3.70s/it] {'loss': 1.7787, 'grad_norm': 0.0, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:41<25:32, 3.70s/it] 21%|██ | 107/520 [06:44<25:51, 3.76s/it] {'loss': 1.8298, 'grad_norm': 0.0, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:44<25:51, 3.76s/it] 21%|██ | 108/520 [06:48<26:03, 3.80s/it] {'loss': 2.1999, 'grad_norm': 0.0, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:48<26:03, 3.80s/it] 21%|██ | 109/520 [06:52<25:43, 3.75s/it] {'loss': 1.9115, 'grad_norm': 0.0, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:52<25:43, 3.75s/it] 21%|██ | 110/520 [06:56<25:25, 3.72s/it] {'loss': 2.1085, 'grad_norm': 0.0, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [06:56<25:25, 3.72s/it] 21%|██▏ | 111/520 [06:59<25:15, 3.71s/it] {'loss': 2.2082, 'grad_norm': 0.0, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [06:59<25:15, 3.71s/it] 22%|██▏ | 112/520 [07:03<25:03, 3.69s/it] {'loss': 1.9692, 'grad_norm': 0.0, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:03<25:03, 3.69s/it] 22%|██▏ | 113/520 [07:07<24:51, 3.67s/it] {'loss': 2.0226, 'grad_norm': 0.0, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:07<24:51, 3.67s/it] 22%|██▏ | 114/520 [07:10<24:51, 3.67s/it] {'loss': 2.1343, 'grad_norm': 0.0, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:10<24:51, 3.67s/it] 22%|██▏ | 115/520 [07:14<24:45, 3.67s/it] {'loss': 2.085, 'grad_norm': 0.0, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:14<24:45, 3.67s/it] 22%|██▏ | 116/520 [07:18<24:40, 3.67s/it] {'loss': 2.0739, 'grad_norm': 0.0, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:18<24:40, 3.67s/it] 22%|██▎ | 117/520 [07:21<24:33, 3.66s/it] {'loss': 2.1182, 'grad_norm': 0.0, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:21<24:33, 3.66s/it] 23%|██▎ | 118/520 [07:25<24:26, 3.65s/it] {'loss': 2.0715, 'grad_norm': 0.0, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:25<24:26, 3.65s/it] 23%|██▎ | 119/520 [07:28<24:22, 3.65s/it] {'loss': 2.0071, 'grad_norm': 0.0, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:28<24:22, 3.65s/it] 23%|██▎ | 120/520 [07:32<24:24, 3.66s/it] {'loss': 2.2848, 'grad_norm': 0.0, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:32<24:24, 3.66s/it] 23%|██▎ | 121/520 [07:36<24:18, 3.66s/it] {'loss': 2.0958, 'grad_norm': 0.0, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:36<24:18, 3.66s/it] 23%|██▎ | 122/520 [07:39<24:13, 3.65s/it] {'loss': 2.0953, 'grad_norm': 0.0, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:39<24:13, 3.65s/it] 24%|██▎ | 123/520 [07:43<24:09, 3.65s/it] {'loss': 1.7717, 'grad_norm': 0.0, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:43<24:09, 3.65s/it] 24%|██▍ | 124/520 [07:47<24:03, 3.64s/it] {'loss': 2.236, 'grad_norm': 0.0, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:47<24:03, 3.64s/it] 24%|██▍ | 125/520 [07:50<24:00, 3.65s/it] {'loss': 2.1224, 'grad_norm': 0.0, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:50<24:00, 3.65s/it] 24%|██▍ | 126/520 [07:55<25:16, 3.85s/it] {'loss': 1.7811, 'grad_norm': 0.0, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:55<25:16, 3.85s/it] 24%|██▍ | 127/520 [07:58<25:06, 3.83s/it] {'loss': 2.2468, 'grad_norm': 0.0, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [07:58<25:06, 3.83s/it] 25%|██▍ | 128/520 [08:02<25:00, 3.83s/it] {'loss': 2.138, 'grad_norm': 0.0, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:02<25:00, 3.83s/it] 25%|██▍ | 129/520 [08:06<24:55, 3.82s/it] {'loss': 2.012, 'grad_norm': 0.0, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:06<24:55, 3.82s/it] 25%|██▌ | 130/520 [08:10<24:54, 3.83s/it] {'loss': 2.0714, 'grad_norm': 0.0, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:10<24:54, 3.83s/it] 25%|██▌ | 131/520 [08:14<24:52, 3.84s/it] {'loss': 1.7064, 'grad_norm': 0.0, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:14<24:52, 3.84s/it] 25%|██▌ | 132/520 [08:18<24:46, 3.83s/it] {'loss': 2.2907, 'grad_norm': 0.0, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:18<24:46, 3.83s/it] 26%|██▌ | 133/520 [08:21<24:46, 3.84s/it] {'loss': 2.2873, 'grad_norm': 0.0, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:21<24:46, 3.84s/it] 26%|██▌ | 134/520 [08:25<24:39, 3.83s/it] {'loss': 2.1675, 'grad_norm': 0.0, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:25<24:39, 3.83s/it] 26%|██▌ | 135/520 [08:29<24:39, 3.84s/it] {'loss': 2.184, 'grad_norm': 0.0, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:29<24:39, 3.84s/it] 26%|██▌ | 136/520 [08:33<24:35, 3.84s/it] {'loss': 2.109, 'grad_norm': 0.0, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:33<24:35, 3.84s/it] 26%|██▋ | 137/520 [08:37<24:29, 3.84s/it] {'loss': 2.2285, 'grad_norm': 0.0, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:37<24:29, 3.84s/it] 27%|██▋ | 138/520 [08:41<24:31, 3.85s/it] {'loss': 2.0231, 'grad_norm': 0.0, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:41<24:31, 3.85s/it] 27%|██▋ | 139/520 [08:45<24:26, 3.85s/it] {'loss': 1.779, 'grad_norm': 0.0, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:45<24:26, 3.85s/it] 27%|██▋ | 140/520 [08:48<24:26, 3.86s/it] {'loss': 1.8436, 'grad_norm': 0.0, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:48<24:26, 3.86s/it] 27%|██▋ | 141/520 [08:52<24:16, 3.84s/it] {'loss': 1.9943, 'grad_norm': 0.0, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:52<24:16, 3.84s/it] 27%|██▋ | 142/520 [08:56<24:11, 3.84s/it] {'loss': 1.7887, 'grad_norm': 0.0, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:56<24:11, 3.84s/it] 28%|██▊ | 143/520 [09:00<24:11, 3.85s/it] {'loss': 2.1754, 'grad_norm': 0.0, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:00<24:11, 3.85s/it] 28%|██▊ | 144/520 [09:04<24:07, 3.85s/it] {'loss': 2.2613, 'grad_norm': 0.0, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:04<24:07, 3.85s/it] 28%|██▊ | 145/520 [09:08<24:04, 3.85s/it] {'loss': 2.0696, 'grad_norm': 0.0, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:08<24:04, 3.85s/it] 28%|██▊ | 146/520 [09:11<23:59, 3.85s/it] {'loss': 1.935, 'grad_norm': 0.0, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:11<23:59, 3.85s/it] 28%|██▊ | 147/520 [09:15<23:54, 3.85s/it] {'loss': 2.0573, 'grad_norm': 0.0, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:15<23:54, 3.85s/it] 28%|██▊ | 148/520 [09:19<23:52, 3.85s/it] {'loss': 2.0705, 'grad_norm': 0.0, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:19<23:52, 3.85s/it] 29%|██▊ | 149/520 [09:23<23:46, 3.85s/it] {'loss': 2.1033, 'grad_norm': 0.0, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:23<23:46, 3.85s/it] 29%|██▉ | 150/520 [09:27<23:40, 3.84s/it] {'loss': 2.1162, 'grad_norm': 0.0, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:27<23:40, 3.84s/it] 29%|██▉ | 151/520 [09:31<23:38, 3.84s/it] {'loss': 2.1368, 'grad_norm': 0.0, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:31<23:38, 3.84s/it] 29%|██▉ | 152/520 [09:35<23:36, 3.85s/it] {'loss': 2.2102, 'grad_norm': 0.0, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:35<23:36, 3.85s/it] 29%|██▉ | 153/520 [09:38<23:31, 3.85s/it] {'loss': 2.069, 'grad_norm': 0.0, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:38<23:31, 3.85s/it] 30%|██▉ | 154/520 [09:42<23:26, 3.84s/it] {'loss': 2.0422, 'grad_norm': 0.0, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:42<23:26, 3.84s/it] 30%|██▉ | 155/520 [09:46<23:21, 3.84s/it] {'loss': 2.2418, 'grad_norm': 0.0, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:46<23:21, 3.84s/it] 30%|███ | 156/520 [09:50<23:19, 3.84s/it] {'loss': 2.1952, 'grad_norm': 0.0, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:50<23:19, 3.84s/it] 30%|███ | 157/520 [09:54<23:13, 3.84s/it] {'loss': 1.9614, 'grad_norm': 0.0, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:54<23:13, 3.84s/it] 30%|███ | 158/520 [09:58<23:07, 3.83s/it] {'loss': 2.0753, 'grad_norm': 0.0, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:58<23:07, 3.83s/it] 31%|███ | 159/520 [10:01<23:06, 3.84s/it] {'loss': 2.1432, 'grad_norm': 0.0, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:01<23:06, 3.84s/it] 31%|███ | 160/520 [10:05<23:03, 3.84s/it] {'loss': 2.1456, 'grad_norm': 0.0, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:05<23:03, 3.84s/it] 31%|███ | 161/520 [10:09<22:58, 3.84s/it] {'loss': 2.152, 'grad_norm': 0.0, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:09<22:58, 3.84s/it] 31%|███ | 162/520 [10:13<22:55, 3.84s/it] {'loss': 1.8926, 'grad_norm': 0.0, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:13<22:55, 3.84s/it] 31%|███▏ | 163/520 [10:17<22:48, 3.83s/it] {'loss': 2.2871, 'grad_norm': 0.0, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:17<22:48, 3.83s/it] 32%|███▏ | 164/520 [10:21<22:59, 3.87s/it] {'loss': 2.1267, 'grad_norm': 0.0, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:21<22:59, 3.87s/it] 32%|███▏ | 165/520 [10:25<23:12, 3.92s/it] {'loss': 2.1168, 'grad_norm': 0.0, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:25<23:12, 3.92s/it] 32%|███▏ | 166/520 [10:29<23:19, 3.95s/it] {'loss': 2.1565, 'grad_norm': 0.0, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:29<23:19, 3.95s/it] 32%|███▏ | 167/520 [10:33<23:25, 3.98s/it] {'loss': 2.1271, 'grad_norm': 0.0, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:33<23:25, 3.98s/it] 32%|███▏ | 168/520 [10:37<23:27, 4.00s/it] {'loss': 1.9889, 'grad_norm': 0.0, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:37<23:27, 4.00s/it] 32%|███▎ | 169/520 [10:41<23:25, 4.01s/it] {'loss': 2.1528, 'grad_norm': 0.0, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:41<23:25, 4.01s/it] 33%|███▎ | 170/520 [10:45<23:25, 4.02s/it] {'loss': 1.809, 'grad_norm': 0.0, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:45<23:25, 4.02s/it] 33%|███▎ | 171/520 [10:49<23:13, 3.99s/it] {'loss': 2.2505, 'grad_norm': 0.0, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:49<23:13, 3.99s/it] 33%|███▎ | 172/520 [10:53<22:52, 3.94s/it] {'loss': 2.0153, 'grad_norm': 0.0, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:53<22:52, 3.94s/it] 33%|███▎ | 173/520 [10:57<22:35, 3.91s/it] {'loss': 2.1214, 'grad_norm': 0.0, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:57<22:35, 3.91s/it] 33%|███▎ | 174/520 [11:00<22:27, 3.89s/it] {'loss': 2.1899, 'grad_norm': 0.0, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:00<22:27, 3.89s/it] 34%|███▎ | 175/520 [11:04<22:17, 3.88s/it] {'loss': 2.0477, 'grad_norm': 0.0, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:04<22:17, 3.88s/it] 34%|███▍ | 176/520 [11:08<22:10, 3.87s/it] {'loss': 1.9286, 'grad_norm': 0.0, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:08<22:10, 3.87s/it] 34%|███▍ | 177/520 [11:12<22:02, 3.85s/it] {'loss': 1.9234, 'grad_norm': 0.0, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:12<22:02, 3.85s/it] 34%|███▍ | 178/520 [11:16<21:55, 3.85s/it] {'loss': 2.1311, 'grad_norm': 0.0, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:16<21:55, 3.85s/it] 34%|███▍ | 179/520 [11:20<21:50, 3.84s/it] {'loss': 2.127, 'grad_norm': 0.0, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:20<21:50, 3.84s/it] 35%|███▍ | 180/520 [11:23<21:47, 3.84s/it] {'loss': 2.0928, 'grad_norm': 0.0, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:23<21:47, 3.84s/it] 35%|███▍ | 181/520 [11:27<21:41, 3.84s/it] {'loss': 1.924, 'grad_norm': 0.0, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:27<21:41, 3.84s/it] 35%|███▌ | 182/520 [11:31<21:37, 3.84s/it] {'loss': 2.173, 'grad_norm': 0.0, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:31<21:37, 3.84s/it] 35%|███▌ | 183/520 [11:35<21:34, 3.84s/it] {'loss': 2.0064, 'grad_norm': 0.0, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:35<21:34, 3.84s/it] 35%|███▌ | 184/520 [11:39<21:38, 3.87s/it] {'loss': 2.2563, 'grad_norm': 0.0, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:39<21:38, 3.87s/it] 36%|███▌ | 185/520 [11:43<21:33, 3.86s/it] {'loss': 2.0567, 'grad_norm': 0.0, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:43<21:33, 3.86s/it] 36%|███▌ | 186/520 [11:47<21:27, 3.85s/it] {'loss': 2.0963, 'grad_norm': 0.0, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:47<21:27, 3.85s/it] 36%|███▌ | 187/520 [11:50<21:22, 3.85s/it] {'loss': 2.2718, 'grad_norm': 0.0, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:50<21:22, 3.85s/it] 36%|███▌ | 188/520 [11:54<21:19, 3.85s/it] {'loss': 2.1637, 'grad_norm': 0.0, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:54<21:19, 3.85s/it] 36%|███▋ | 189/520 [11:58<21:16, 3.86s/it] {'loss': 2.0487, 'grad_norm': 0.0, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:58<21:16, 3.86s/it] 37%|███▋ | 190/520 [12:02<21:10, 3.85s/it] {'loss': 2.2378, 'grad_norm': 0.0, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:02<21:10, 3.85s/it] 37%|███▋ | 191/520 [12:06<21:11, 3.87s/it] {'loss': 2.2167, 'grad_norm': 0.0, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:06<21:11, 3.87s/it] 37%|███▋ | 192/520 [12:10<21:11, 3.88s/it] {'loss': 2.1261, 'grad_norm': 0.0, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:10<21:11, 3.88s/it] 37%|███▋ | 193/520 [12:14<21:06, 3.87s/it] {'loss': 1.9259, 'grad_norm': 0.0, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:14<21:06, 3.87s/it] 37%|███▋ | 194/520 [12:17<21:01, 3.87s/it] {'loss': 1.7848, 'grad_norm': 0.0, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:17<21:01, 3.87s/it] 38%|███▊ | 195/520 [12:21<20:35, 3.80s/it] {'loss': 1.9879, 'grad_norm': 0.0, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:21<20:35, 3.80s/it] 38%|███▊ | 196/520 [12:25<20:17, 3.76s/it] {'loss': 2.2013, 'grad_norm': 0.0, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:25<20:17, 3.76s/it] 38%|███▊ | 197/520 [12:28<20:04, 3.73s/it] {'loss': 2.0407, 'grad_norm': 0.0, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:28<20:04, 3.73s/it] 38%|███▊ | 198/520 [12:32<19:53, 3.71s/it] {'loss': 2.1881, 'grad_norm': 0.0, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:32<19:53, 3.71s/it] 38%|███▊ | 199/520 [12:36<19:45, 3.69s/it] {'loss': 2.1723, 'grad_norm': 0.0, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:36<19:45, 3.69s/it] 38%|███▊ | 200/520 [12:39<19:41, 3.69s/it] {'loss': 1.8883, 'grad_norm': 0.0, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:39<19:41, 3.69s/it] 39%|███▊ | 201/520 [12:43<19:36, 3.69s/it] {'loss': 1.789, 'grad_norm': 0.0, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:43<19:36, 3.69s/it] 39%|███▉ | 202/520 [12:47<19:30, 3.68s/it] {'loss': 2.1073, 'grad_norm': 0.0, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:47<19:30, 3.68s/it] 39%|███▉ | 203/520 [12:51<19:35, 3.71s/it] {'loss': 2.1082, 'grad_norm': 0.0, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:51<19:35, 3.71s/it] 39%|███▉ | 204/520 [12:54<19:49, 3.76s/it] {'loss': 2.124, 'grad_norm': 0.0, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:54<19:49, 3.76s/it] 39%|███▉ | 205/520 [12:58<19:59, 3.81s/it] {'loss': 1.7868, 'grad_norm': 0.0, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:58<19:59, 3.81s/it] 40%|███▉ | 206/520 [13:02<20:03, 3.83s/it] {'loss': 2.0823, 'grad_norm': 0.0, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:02<20:03, 3.83s/it] 40%|███▉ | 207/520 [13:06<19:42, 3.78s/it] {'loss': 1.6807, 'grad_norm': 0.0, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:06<19:42, 3.78s/it] 40%|████ | 208/520 [13:10<19:26, 3.74s/it] {'loss': 2.3713, 'grad_norm': 0.0, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:10<19:26, 3.74s/it] 40%|████ | 209/520 [13:13<19:12, 3.70s/it] {'loss': 2.1462, 'grad_norm': 0.0, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:13<19:12, 3.70s/it] 40%|████ | 210/520 [13:17<19:01, 3.68s/it] {'loss': 2.0903, 'grad_norm': 0.0, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:17<19:01, 3.68s/it] 41%|████ | 211/520 [13:20<18:54, 3.67s/it] {'loss': 2.1076, 'grad_norm': 0.0, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:20<18:54, 3.67s/it] 41%|████ | 212/520 [13:24<18:48, 3.66s/it] {'loss': 1.9716, 'grad_norm': 0.0, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:24<18:48, 3.66s/it] 41%|████ | 213/520 [13:28<18:42, 3.66s/it] {'loss': 2.3066, 'grad_norm': 0.0, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:28<18:42, 3.66s/it] 41%|████ | 214/520 [13:31<18:40, 3.66s/it] {'loss': 2.1004, 'grad_norm': 0.0, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:31<18:40, 3.66s/it] 41%|████▏ | 215/520 [13:35<18:37, 3.66s/it] {'loss': 1.9269, 'grad_norm': 0.0, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:35<18:37, 3.66s/it] 42%|████▏ | 216/520 [13:39<18:34, 3.67s/it] {'loss': 2.091, 'grad_norm': 0.0, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:39<18:34, 3.67s/it] 42%|████▏ | 217/520 [13:42<18:31, 3.67s/it] {'loss': 2.13, 'grad_norm': 0.0, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:42<18:31, 3.67s/it] 42%|████▏ | 218/520 [13:46<18:28, 3.67s/it] {'loss': 2.2033, 'grad_norm': 0.0, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:46<18:28, 3.67s/it] 42%|████▏ | 219/520 [13:50<18:23, 3.67s/it] {'loss': 2.0636, 'grad_norm': 0.0, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:50<18:23, 3.67s/it] 42%|████▏ | 220/520 [13:53<18:18, 3.66s/it] {'loss': 1.8837, 'grad_norm': 0.0, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:53<18:18, 3.66s/it] 42%|████▎ | 221/520 [13:57<18:13, 3.66s/it] {'loss': 2.1677, 'grad_norm': 0.0, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:57<18:13, 3.66s/it] 43%|████▎ | 222/520 [14:01<18:12, 3.67s/it] {'loss': 2.1221, 'grad_norm': 0.0, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:01<18:12, 3.67s/it] 43%|████▎ | 223/520 [14:04<18:09, 3.67s/it] {'loss': 2.061, 'grad_norm': 0.0, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:04<18:09, 3.67s/it] 43%|████▎ | 224/520 [14:08<18:04, 3.67s/it] {'loss': 1.6814, 'grad_norm': 0.0, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:08<18:04, 3.67s/it] 43%|████▎ | 225/520 [14:12<18:11, 3.70s/it] {'loss': 2.0363, 'grad_norm': 0.0, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:12<18:11, 3.70s/it] 43%|████▎ | 226/520 [14:16<18:16, 3.73s/it] {'loss': 2.1585, 'grad_norm': 0.0, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:16<18:16, 3.73s/it] 44%|████▎ | 227/520 [14:19<18:19, 3.75s/it] {'loss': 2.081, 'grad_norm': 0.0, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:19<18:19, 3.75s/it] 44%|████▍ | 228/520 [14:23<18:17, 3.76s/it] {'loss': 1.9309, 'grad_norm': 0.0, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:23<18:17, 3.76s/it] 44%|████▍ | 229/520 [14:27<18:16, 3.77s/it] {'loss': 1.963, 'grad_norm': 0.0, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:27<18:16, 3.77s/it] 44%|████▍ | 230/520 [14:31<18:13, 3.77s/it] {'loss': 2.1559, 'grad_norm': 0.0, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:31<18:13, 3.77s/it] 44%|████▍ | 231/520 [14:35<18:05, 3.75s/it] {'loss': 2.0662, 'grad_norm': 0.0, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:35<18:05, 3.75s/it] 45%|████▍ | 232/520 [14:38<17:51, 3.72s/it] {'loss': 1.9045, 'grad_norm': 0.0, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:38<17:51, 3.72s/it] 45%|████▍ | 233/520 [14:42<17:40, 3.70s/it] {'loss': 1.9718, 'grad_norm': 0.0, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:42<17:40, 3.70s/it] 45%|████▌ | 234/520 [14:46<17:37, 3.70s/it] {'loss': 2.1082, 'grad_norm': 0.0, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:46<17:37, 3.70s/it] 45%|████▌ | 235/520 [14:49<17:28, 3.68s/it] {'loss': 2.1515, 'grad_norm': 0.0, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:49<17:28, 3.68s/it] 45%|████▌ | 236/520 [14:53<17:17, 3.65s/it] {'loss': 2.0049, 'grad_norm': 0.0, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:53<17:17, 3.65s/it] 46%|████▌ | 237/520 [14:56<17:10, 3.64s/it] {'loss': 2.0493, 'grad_norm': 0.0, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:56<17:10, 3.64s/it] 46%|████▌ | 238/520 [15:00<17:05, 3.64s/it] {'loss': 2.1513, 'grad_norm': 0.0, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:00<17:05, 3.64s/it] 46%|████▌ | 239/520 [15:04<17:04, 3.65s/it] {'loss': 2.1663, 'grad_norm': 0.0, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:04<17:04, 3.65s/it] 46%|████▌ | 240/520 [15:07<17:01, 3.65s/it] {'loss': 2.0831, 'grad_norm': 0.0, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:07<17:01, 3.65s/it] 46%|████▋ | 241/520 [15:11<17:00, 3.66s/it] {'loss': 2.1549, 'grad_norm': 0.0, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:11<17:00, 3.66s/it] 47%|████▋ | 242/520 [15:15<17:05, 3.69s/it] {'loss': 2.0452, 'grad_norm': 0.0, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:15<17:05, 3.69s/it] 47%|████▋ | 243/520 [15:18<16:58, 3.68s/it] {'loss': 2.0951, 'grad_norm': 0.0, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:18<16:58, 3.68s/it] 47%|████▋ | 244/520 [15:22<16:50, 3.66s/it] {'loss': 2.0286, 'grad_norm': 0.0, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:22<16:50, 3.66s/it] 47%|████▋ | 245/520 [15:26<16:45, 3.66s/it] {'loss': 2.1138, 'grad_norm': 0.0, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:26<16:45, 3.66s/it] 47%|████▋ | 246/520 [15:29<16:41, 3.65s/it] {'loss': 1.9508, 'grad_norm': 0.0, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:29<16:41, 3.65s/it] 48%|████▊ | 247/520 [15:33<16:35, 3.65s/it] {'loss': 2.1498, 'grad_norm': 0.0, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:33<16:35, 3.65s/it] 48%|████▊ | 248/520 [15:37<16:37, 3.67s/it] {'loss': 2.1007, 'grad_norm': 0.0, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:37<16:37, 3.67s/it] 48%|████▊ | 249/520 [15:40<16:36, 3.68s/it] {'loss': 2.118, 'grad_norm': 0.0, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:40<16:36, 3.68s/it] 48%|████▊ | 250/520 [15:44<16:30, 3.67s/it] {'loss': 2.2819, 'grad_norm': 0.0, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:44<16:30, 3.67s/it] 48%|████▊ | 251/520 [15:48<16:23, 3.65s/it] {'loss': 2.1132, 'grad_norm': 0.0, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:48<16:23, 3.65s/it] 48%|████▊ | 252/520 [15:51<16:22, 3.66s/it] {'loss': 1.8759, 'grad_norm': 0.0, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:51<16:22, 3.66s/it] 49%|████▊ | 253/520 [15:55<16:18, 3.66s/it] {'loss': 2.2258, 'grad_norm': 0.0, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:55<16:18, 3.66s/it] 49%|████▉ | 254/520 [15:59<16:14, 3.66s/it] {'loss': 2.059, 'grad_norm': 0.0, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:59<16:14, 3.66s/it] 49%|████▉ | 255/520 [16:02<16:08, 3.66s/it] {'loss': 2.1846, 'grad_norm': 0.0, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:02<16:08, 3.66s/it] 49%|████▉ | 256/520 [16:06<16:02, 3.64s/it] {'loss': 2.1549, 'grad_norm': 0.0, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:06<16:02, 3.64s/it] 49%|████▉ | 257/520 [16:10<15:57, 3.64s/it] {'loss': 2.1188, 'grad_norm': 0.0, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:10<15:57, 3.64s/it] 50%|████▉ | 258/520 [16:13<15:56, 3.65s/it] {'loss': 2.0202, 'grad_norm': 0.0, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:13<15:56, 3.65s/it] 50%|████▉ | 259/520 [16:17<15:55, 3.66s/it] {'loss': 2.2765, 'grad_norm': 0.0, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:17<15:55, 3.66s/it] 50%|█████ | 260/520 [16:21<15:53, 3.67s/it] {'loss': 1.8251, 'grad_norm': 0.0, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:21<15:53, 3.67s/it] 50%|█████ | 261/520 [16:24<15:52, 3.68s/it] {'loss': 1.9131, 'grad_norm': 0.0, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:24<15:52, 3.68s/it] 50%|█████ | 262/520 [16:28<15:48, 3.67s/it] {'loss': 2.1102, 'grad_norm': 0.0, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:28<15:48, 3.67s/it] 51%|█████ | 263/520 [16:32<15:44, 3.67s/it] {'loss': 1.9227, 'grad_norm': 0.0, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:32<15:44, 3.67s/it] 51%|█████ | 264/520 [16:35<15:40, 3.68s/it] {'loss': 2.1143, 'grad_norm': 0.0, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:35<15:40, 3.68s/it] 51%|█████ | 265/520 [16:39<15:39, 3.68s/it] {'loss': 2.276, 'grad_norm': 0.0, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:39<15:39, 3.68s/it] 51%|█████ | 266/520 [16:43<15:32, 3.67s/it] {'loss': 1.9522, 'grad_norm': 0.0, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:43<15:32, 3.67s/it] 51%|█████▏ | 267/520 [16:46<15:29, 3.67s/it] {'loss': 2.0106, 'grad_norm': 0.0, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:46<15:29, 3.67s/it] 52%|█████▏ | 268/520 [16:50<15:26, 3.68s/it] {'loss': 2.0169, 'grad_norm': 0.0, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:50<15:26, 3.68s/it] 52%|█████▏ | 269/520 [16:54<15:29, 3.70s/it] {'loss': 2.2081, 'grad_norm': 0.0, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:54<15:29, 3.70s/it] 52%|█████▏ | 270/520 [16:57<15:28, 3.72s/it] {'loss': 1.872, 'grad_norm': 0.0, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:57<15:28, 3.72s/it] 52%|█████▏ | 271/520 [17:01<15:29, 3.73s/it] {'loss': 2.1221, 'grad_norm': 0.0, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:01<15:29, 3.73s/it] 52%|█████▏ | 272/520 [17:05<15:25, 3.73s/it] {'loss': 1.8539, 'grad_norm': 0.0, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:05<15:25, 3.73s/it] 52%|█████▎ | 273/520 [17:09<15:23, 3.74s/it] {'loss': 1.8384, 'grad_norm': 0.0, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:09<15:23, 3.74s/it] 53%|█████▎ | 274/520 [17:12<15:20, 3.74s/it] {'loss': 2.1584, 'grad_norm': 0.0, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:12<15:20, 3.74s/it] 53%|█████▎ | 275/520 [17:16<15:17, 3.75s/it] {'loss': 2.1887, 'grad_norm': 0.0, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:16<15:17, 3.75s/it] 53%|█████▎ | 276/520 [17:20<15:16, 3.76s/it] {'loss': 2.1666, 'grad_norm': 0.0, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:20<15:16, 3.76s/it] 53%|█████▎ | 277/520 [17:24<15:12, 3.75s/it] {'loss': 1.8919, 'grad_norm': 0.0, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:24<15:12, 3.75s/it] 53%|█████▎ | 278/520 [17:28<15:08, 3.75s/it] {'loss': 2.0114, 'grad_norm': 0.0, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:28<15:08, 3.75s/it] 54%|█████▎ | 279/520 [17:31<15:06, 3.76s/it] {'loss': 2.005, 'grad_norm': 0.0, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:31<15:06, 3.76s/it] 54%|█████▍ | 280/520 [17:35<15:04, 3.77s/it] {'loss': 2.1648, 'grad_norm': 0.0, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:35<15:04, 3.77s/it] 54%|█████▍ | 281/520 [17:39<14:59, 3.76s/it] {'loss': 2.2198, 'grad_norm': 0.0, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:39<14:59, 3.76s/it] 54%|█████▍ | 282/520 [17:43<14:54, 3.76s/it] {'loss': 2.0525, 'grad_norm': 0.0, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:43<14:54, 3.76s/it] 54%|█████▍ | 283/520 [17:46<14:46, 3.74s/it] {'loss': 2.189, 'grad_norm': 0.0, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:46<14:46, 3.74s/it] 55%|█████▍ | 284/520 [17:50<14:37, 3.72s/it] {'loss': 1.9636, 'grad_norm': 0.0, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:50<14:37, 3.72s/it] 55%|█████▍ | 285/520 [17:54<14:27, 3.69s/it] {'loss': 2.0822, 'grad_norm': 0.0, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:54<14:27, 3.69s/it] 55%|█████▌ | 286/520 [17:57<14:20, 3.68s/it] {'loss': 2.1565, 'grad_norm': 0.0, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:57<14:20, 3.68s/it] 55%|█████▌ | 287/520 [18:01<14:19, 3.69s/it] {'loss': 2.2603, 'grad_norm': 0.0, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:01<14:19, 3.69s/it] 55%|█████▌ | 288/520 [18:05<14:16, 3.69s/it] {'loss': 2.0485, 'grad_norm': 0.0, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:05<14:16, 3.69s/it] 56%|█████▌ | 289/520 [18:08<14:09, 3.68s/it] {'loss': 2.2023, 'grad_norm': 0.0, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:08<14:09, 3.68s/it] 56%|█████▌ | 290/520 [18:12<14:05, 3.67s/it] {'loss': 1.914, 'grad_norm': 0.0, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:12<14:05, 3.67s/it] 56%|█████▌ | 291/520 [18:16<14:01, 3.68s/it] {'loss': 2.0878, 'grad_norm': 0.0, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:16<14:01, 3.68s/it] 56%|█████▌ | 292/520 [18:19<13:56, 3.67s/it] {'loss': 2.1345, 'grad_norm': 0.0, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:19<13:56, 3.67s/it] 56%|█████▋ | 293/520 [18:23<13:50, 3.66s/it] {'loss': 2.1425, 'grad_norm': 0.0, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:23<13:50, 3.66s/it] 57%|█████▋ | 294/520 [18:27<13:46, 3.66s/it] {'loss': 2.2303, 'grad_norm': 0.0, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:27<13:46, 3.66s/it] 57%|█████▋ | 295/520 [18:30<13:44, 3.66s/it] {'loss': 1.832, 'grad_norm': 0.0, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:30<13:44, 3.66s/it] 57%|█████▋ | 296/520 [18:34<13:41, 3.67s/it] {'loss': 2.0787, 'grad_norm': 0.0, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:34<13:41, 3.67s/it] 57%|█████▋ | 297/520 [18:38<13:34, 3.65s/it] {'loss': 2.1771, 'grad_norm': 0.0, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:38<13:34, 3.65s/it] 57%|█████▋ | 298/520 [18:41<13:31, 3.65s/it] {'loss': 1.9442, 'grad_norm': 0.0, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:41<13:31, 3.65s/it] 57%|█████▊ | 299/520 [18:45<13:27, 3.66s/it] {'loss': 1.8794, 'grad_norm': 0.0, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:45<13:27, 3.66s/it] 58%|█████▊ | 300/520 [18:49<13:23, 3.65s/it] {'loss': 2.1153, 'grad_norm': 0.0, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:49<13:23, 3.65s/it] 58%|█████▊ | 301/520 [18:52<13:19, 3.65s/it] {'loss': 2.0707, 'grad_norm': 0.0, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:52<13:19, 3.65s/it] 58%|█████▊ | 302/520 [18:56<13:16, 3.65s/it] {'loss': 1.8532, 'grad_norm': 0.0, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:56<13:16, 3.65s/it] 58%|█████▊ | 303/520 [18:59<13:11, 3.65s/it] {'loss': 2.1844, 'grad_norm': 0.0, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:59<13:11, 3.65s/it] 58%|█████▊ | 304/520 [19:03<13:09, 3.65s/it] {'loss': 2.081, 'grad_norm': 0.0, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:03<13:09, 3.65s/it] 59%|█████▊ | 305/520 [19:07<13:04, 3.65s/it] {'loss': 2.1651, 'grad_norm': 0.0, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:07<13:04, 3.65s/it] 59%|█████▉ | 306/520 [19:10<13:01, 3.65s/it] {'loss': 2.1107, 'grad_norm': 0.0, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:10<13:01, 3.65s/it] 59%|█████▉ | 307/520 [19:14<13:16, 3.74s/it] {'loss': 2.0262, 'grad_norm': 0.0, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:14<13:16, 3.74s/it] 59%|█████▉ | 308/520 [19:18<13:07, 3.71s/it] {'loss': 2.0165, 'grad_norm': 0.0, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:18<13:07, 3.71s/it] 59%|█████▉ | 309/520 [19:22<12:59, 3.69s/it] {'loss': 1.9269, 'grad_norm': 0.0, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:22<12:59, 3.69s/it] 60%|█████▉ | 310/520 [19:25<12:54, 3.69s/it] {'loss': 1.9954, 'grad_norm': 0.0, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:25<12:54, 3.69s/it] 60%|█████▉ | 311/520 [19:29<12:48, 3.68s/it] {'loss': 2.065, 'grad_norm': 0.0, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:29<12:48, 3.68s/it] 60%|██████ | 312/520 [19:33<12:46, 3.68s/it] {'loss': 2.1635, 'grad_norm': 0.0, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:33<12:46, 3.68s/it] 60%|██████ | 313/520 [19:36<12:42, 3.68s/it] {'loss': 1.8959, 'grad_norm': 0.0, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:36<12:42, 3.68s/it] 60%|██████ | 314/520 [19:41<13:09, 3.83s/it] {'loss': 2.0684, 'grad_norm': 0.0, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:41<13:09, 3.83s/it] 61%|██████ | 315/520 [19:44<13:02, 3.82s/it] {'loss': 2.0994, 'grad_norm': 0.0, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:44<13:02, 3.82s/it] 61%|██████ | 316/520 [19:49<13:24, 3.94s/it] {'loss': 2.1863, 'grad_norm': 0.0, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:49<13:24, 3.94s/it] 61%|██████ | 317/520 [19:52<13:13, 3.91s/it] {'loss': 1.9533, 'grad_norm': 0.0, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:52<13:13, 3.91s/it] 61%|██████ | 318/520 [19:56<12:56, 3.85s/it] {'loss': 2.2686, 'grad_norm': 0.0, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:56<12:56, 3.85s/it] 61%|██████▏ | 319/520 [20:00<13:08, 3.92s/it] {'loss': 1.8863, 'grad_norm': 0.0, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:00<13:08, 3.92s/it] 62%|██████▏ | 320/520 [20:04<12:49, 3.85s/it] {'loss': 2.0865, 'grad_norm': 0.0, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:04<12:49, 3.85s/it] 62%|██████▏ | 321/520 [20:08<12:37, 3.80s/it] {'loss': 2.0712, 'grad_norm': 0.0, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:08<12:37, 3.80s/it] 62%|██████▏ | 322/520 [20:11<12:28, 3.78s/it] {'loss': 1.8969, 'grad_norm': 0.0, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:11<12:28, 3.78s/it] 62%|██████▏ | 323/520 [20:15<12:19, 3.75s/it] {'loss': 2.0202, 'grad_norm': 0.0, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:15<12:19, 3.75s/it] 62%|██████▏ | 324/520 [20:19<12:11, 3.73s/it] {'loss': 2.0551, 'grad_norm': 0.0, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:19<12:11, 3.73s/it] 62%|██████▎ | 325/520 [20:22<12:06, 3.73s/it] {'loss': 2.1566, 'grad_norm': 0.0, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:22<12:06, 3.73s/it] 63%|██████▎ | 326/520 [20:26<12:00, 3.71s/it] {'loss': 2.1909, 'grad_norm': 0.0, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:26<12:00, 3.71s/it] 63%|██████▎ | 327/520 [20:30<11:54, 3.70s/it] {'loss': 2.061, 'grad_norm': 0.0, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:30<11:54, 3.70s/it] 63%|██████▎ | 328/520 [20:33<11:48, 3.69s/it] {'loss': 2.1111, 'grad_norm': 0.0, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:33<11:48, 3.69s/it] 63%|██████▎ | 329/520 [20:37<11:44, 3.69s/it] {'loss': 1.9436, 'grad_norm': 0.0, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:37<11:44, 3.69s/it] 63%|██████▎ | 330/520 [20:41<11:43, 3.70s/it] {'loss': 2.1281, 'grad_norm': 0.0, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:41<11:43, 3.70s/it] 64%|██████▎ | 331/520 [20:44<11:37, 3.69s/it] {'loss': 2.1551, 'grad_norm': 0.0, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:44<11:37, 3.69s/it] 64%|██████▍ | 332/520 [20:48<11:33, 3.69s/it] {'loss': 1.8491, 'grad_norm': 0.0, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:48<11:33, 3.69s/it] 64%|██████▍ | 333/520 [20:52<11:29, 3.69s/it] {'loss': 2.1184, 'grad_norm': 0.0, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:52<11:29, 3.69s/it] 64%|██████▍ | 334/520 [20:56<11:25, 3.69s/it] {'loss': 2.1157, 'grad_norm': 0.0, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:56<11:25, 3.69s/it] 64%|██████▍ | 335/520 [20:59<11:21, 3.68s/it] {'loss': 2.013, 'grad_norm': 0.0, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:59<11:21, 3.68s/it] 65%|██████▍ | 336/520 [21:03<11:18, 3.69s/it] {'loss': 2.1874, 'grad_norm': 0.0, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:03<11:18, 3.69s/it] 65%|██████▍ | 337/520 [21:07<11:15, 3.69s/it] {'loss': 2.2477, 'grad_norm': 0.0, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:07<11:15, 3.69s/it] 65%|██████▌ | 338/520 [21:10<11:12, 3.69s/it] {'loss': 2.1774, 'grad_norm': 0.0, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:10<11:12, 3.69s/it] 65%|██████▌ | 339/520 [21:14<11:09, 3.70s/it] {'loss': 2.126, 'grad_norm': 0.0, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:14<11:09, 3.70s/it] 65%|██████▌ | 340/520 [21:18<11:10, 3.72s/it] {'loss': 2.0845, 'grad_norm': 0.0, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:18<11:10, 3.72s/it] 66%|██████▌ | 341/520 [21:21<11:02, 3.70s/it] {'loss': 2.094, 'grad_norm': 0.0, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:21<11:02, 3.70s/it] 66%|██████▌ | 342/520 [21:25<10:57, 3.69s/it] {'loss': 2.0199, 'grad_norm': 0.0, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:25<10:57, 3.69s/it] 66%|██████▌ | 343/520 [21:29<10:56, 3.71s/it] {'loss': 1.7182, 'grad_norm': 0.0, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:29<10:56, 3.71s/it] 66%|██████▌ | 344/520 [21:33<10:55, 3.72s/it] {'loss': 2.1244, 'grad_norm': 0.0, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:33<10:55, 3.72s/it] 66%|██████▋ | 345/520 [21:36<10:48, 3.71s/it] {'loss': 2.2588, 'grad_norm': 0.0, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:36<10:48, 3.71s/it] 67%|██████▋ | 346/520 [21:40<10:44, 3.70s/it] {'loss': 1.859, 'grad_norm': 0.0, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:40<10:44, 3.70s/it] 67%|██████▋ | 347/520 [21:44<10:39, 3.70s/it] {'loss': 1.9277, 'grad_norm': 0.0, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:44<10:39, 3.70s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:47<10:35, 3.69s/it] {'loss': 2.405, 'grad_norm': 0.0, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:47<10:35, 3.69s/it] 67%|██████▋ | 349/520 [21:51<10:31, 3.69s/it] {'loss': 2.2236, 'grad_norm': 0.0, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:51<10:31, 3.69s/it] 67%|██████▋ | 350/520 [21:55<10:27, 3.69s/it] {'loss': 2.1184, 'grad_norm': 0.0, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:55<10:27, 3.69s/it] 68%|██████▊ | 351/520 [21:58<10:25, 3.70s/it] {'loss': 2.0414, 'grad_norm': 0.0, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:58<10:25, 3.70s/it] 68%|██████▊ | 352/520 [22:02<10:27, 3.74s/it] {'loss': 2.0824, 'grad_norm': 0.0, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:02<10:27, 3.74s/it] 68%|██████▊ | 353/520 [22:06<10:43, 3.85s/it] {'loss': 1.8115, 'grad_norm': 0.0, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:06<10:43, 3.85s/it] 68%|██████▊ | 354/520 [22:11<10:55, 3.95s/it] {'loss': 1.8797, 'grad_norm': 0.0, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:11<10:55, 3.95s/it] 68%|██████▊ | 355/520 [22:14<10:44, 3.90s/it] {'loss': 2.0561, 'grad_norm': 0.0, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:14<10:44, 3.90s/it] 68%|██████▊ | 356/520 [22:18<10:36, 3.88s/it] {'loss': 2.2531, 'grad_norm': 0.0, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:18<10:36, 3.88s/it] 69%|██████▊ | 357/520 [22:22<10:32, 3.88s/it] {'loss': 2.0294, 'grad_norm': 0.0, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:22<10:32, 3.88s/it] 69%|██████▉ | 358/520 [22:26<10:38, 3.94s/it] {'loss': 2.0531, 'grad_norm': 0.0, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:26<10:38, 3.94s/it] 69%|██████▉ | 359/520 [22:30<10:42, 3.99s/it] {'loss': 2.011, 'grad_norm': 0.0, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:30<10:42, 3.99s/it] 69%|██████▉ | 360/520 [22:34<10:42, 4.02s/it] {'loss': 1.9948, 'grad_norm': 0.0, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:34<10:42, 4.02s/it] 69%|██████▉ | 361/520 [22:38<10:36, 4.00s/it] {'loss': 1.7504, 'grad_norm': 0.0, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:38<10:36, 4.00s/it] 70%|██████▉ | 362/520 [22:42<10:25, 3.96s/it] {'loss': 2.2058, 'grad_norm': 0.0, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:42<10:25, 3.96s/it] 70%|██████▉ | 363/520 [22:46<10:15, 3.92s/it] {'loss': 2.0753, 'grad_norm': 0.0, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:46<10:15, 3.92s/it] 70%|███████ | 364/520 [22:50<10:10, 3.91s/it] {'loss': 1.9811, 'grad_norm': 0.0, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:50<10:10, 3.91s/it] 70%|███████ | 365/520 [22:54<10:05, 3.91s/it] {'loss': 2.1137, 'grad_norm': 0.0, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:54<10:05, 3.91s/it] 70%|███████ | 366/520 [22:58<10:12, 3.98s/it] {'loss': 2.1027, 'grad_norm': 0.0, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:58<10:12, 3.98s/it] 71%|███████ | 367/520 [23:02<10:09, 3.98s/it] {'loss': 2.1701, 'grad_norm': 0.0, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:02<10:09, 3.98s/it] 71%|███████ | 368/520 [23:06<10:00, 3.95s/it] {'loss': 2.1175, 'grad_norm': 0.0, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:06<10:00, 3.95s/it] 71%|███████ | 369/520 [23:10<09:53, 3.93s/it] {'loss': 1.7789, 'grad_norm': 0.0, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:10<09:53, 3.93s/it] 71%|███████ | 370/520 [23:14<09:45, 3.90s/it] {'loss': 2.0015, 'grad_norm': 0.0, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:14<09:45, 3.90s/it] 71%|███████▏ | 371/520 [23:17<09:38, 3.88s/it] {'loss': 2.1704, 'grad_norm': 0.0, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:17<09:38, 3.88s/it] 72%|███████▏ | 372/520 [23:21<09:37, 3.90s/it] {'loss': 1.8294, 'grad_norm': 0.0, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:21<09:37, 3.90s/it] 72%|███████▏ | 373/520 [23:25<09:30, 3.88s/it] {'loss': 2.0181, 'grad_norm': 0.0, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:25<09:30, 3.88s/it] 72%|███████▏ | 374/520 [23:29<09:23, 3.86s/it] {'loss': 2.1018, 'grad_norm': 0.0, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:29<09:23, 3.86s/it] 72%|███████▏ | 375/520 [23:33<09:17, 3.84s/it] {'loss': 2.1132, 'grad_norm': 0.0, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:33<09:17, 3.84s/it] 72%|███████▏ | 376/520 [23:37<09:11, 3.83s/it] {'loss': 2.0573, 'grad_norm': 0.0, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:37<09:11, 3.83s/it] 72%|███████▎ | 377/520 [23:40<09:06, 3.82s/it] {'loss': 2.0899, 'grad_norm': 0.0, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:40<09:06, 3.82s/it] 73%|███████▎ | 378/520 [23:44<09:01, 3.81s/it] {'loss': 2.0289, 'grad_norm': 0.0, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:44<09:01, 3.81s/it] 73%|███████▎ | 379/520 [23:48<08:58, 3.82s/it] {'loss': 1.9774, 'grad_norm': 0.0, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:48<08:58, 3.82s/it] 73%|███████▎ | 380/520 [23:52<08:54, 3.82s/it] {'loss': 1.8319, 'grad_norm': 0.0, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:52<08:54, 3.82s/it] 73%|███████▎ | 381/520 [23:56<08:49, 3.81s/it] {'loss': 2.0371, 'grad_norm': 0.0, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:56<08:49, 3.81s/it] 73%|███████▎ | 382/520 [23:59<08:46, 3.82s/it] {'loss': 1.9153, 'grad_norm': 0.0, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:59<08:46, 3.82s/it] 74%|███████▎ | 383/520 [24:03<08:42, 3.81s/it] {'loss': 2.2443, 'grad_norm': 0.0, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:03<08:42, 3.81s/it] 74%|███████▍ | 384/520 [24:07<08:37, 3.80s/it] {'loss': 1.6572, 'grad_norm': 0.0, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:07<08:37, 3.80s/it] 74%|███████▍ | 385/520 [24:11<08:26, 3.75s/it] {'loss': 1.9484, 'grad_norm': 0.0, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:11<08:26, 3.75s/it] 74%|███████▍ | 386/520 [24:14<08:17, 3.72s/it] {'loss': 2.0001, 'grad_norm': 0.0, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:14<08:17, 3.72s/it] 74%|███████▍ | 387/520 [24:18<08:12, 3.70s/it] {'loss': 1.7967, 'grad_norm': 0.0, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:18<08:12, 3.70s/it] 75%|███████▍ | 388/520 [24:22<08:06, 3.68s/it] {'loss': 2.1252, 'grad_norm': 0.0, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:22<08:06, 3.68s/it] 75%|███████▍ | 389/520 [24:25<08:00, 3.67s/it] {'loss': 2.2819, 'grad_norm': 0.0, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:25<08:00, 3.67s/it] 75%|███████▌ | 390/520 [24:29<07:55, 3.66s/it] {'loss': 2.0794, 'grad_norm': 0.0, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:29<07:55, 3.66s/it] 75%|███████▌ | 391/520 [24:33<07:53, 3.67s/it] {'loss': 2.0751, 'grad_norm': 0.0, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:33<07:53, 3.67s/it] 75%|███████▌ | 392/520 [24:36<07:49, 3.67s/it] {'loss': 2.0834, 'grad_norm': 0.0, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:36<07:49, 3.67s/it] 76%|███████▌ | 393/520 [24:40<07:44, 3.66s/it] {'loss': 1.6935, 'grad_norm': 0.0, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:40<07:44, 3.66s/it] 76%|███████▌ | 394/520 [24:43<07:39, 3.65s/it] {'loss': 2.1218, 'grad_norm': 0.0, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:43<07:39, 3.65s/it] 76%|███████▌ | 395/520 [24:47<07:35, 3.65s/it] {'loss': 2.1493, 'grad_norm': 0.0, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:47<07:35, 3.65s/it] 76%|███████▌ | 396/520 [24:51<07:31, 3.64s/it] {'loss': 2.0961, 'grad_norm': 0.0, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:51<07:31, 3.64s/it] 76%|███████▋ | 397/520 [24:54<07:28, 3.65s/it] {'loss': 2.0472, 'grad_norm': 0.0, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:54<07:28, 3.65s/it] 77%|███████▋ | 398/520 [24:58<07:25, 3.65s/it] {'loss': 2.2098, 'grad_norm': 0.0, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:58<07:25, 3.65s/it] 77%|███████▋ | 399/520 [25:02<07:24, 3.68s/it] {'loss': 1.8453, 'grad_norm': 0.0, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:02<07:24, 3.68s/it] 77%|███████▋ | 400/520 [25:05<07:22, 3.69s/it] {'loss': 1.8965, 'grad_norm': 0.0, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:05<07:22, 3.69s/it] 77%|███████▋ | 401/520 [25:09<07:18, 3.69s/it] {'loss': 2.0165, 'grad_norm': 0.0, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:09<07:18, 3.69s/it] 77%|███████▋ | 402/520 [25:13<07:14, 3.68s/it] {'loss': 2.1351, 'grad_norm': 0.0, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:13<07:14, 3.68s/it] 78%|███████▊ | 403/520 [25:17<07:10, 3.68s/it] {'loss': 2.1219, 'grad_norm': 0.0, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:17<07:10, 3.68s/it] 78%|███████▊ | 404/520 [25:20<07:05, 3.67s/it] {'loss': 2.2818, 'grad_norm': 0.0, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:20<07:05, 3.67s/it] 78%|███████▊ | 405/520 [25:24<07:04, 3.69s/it] {'loss': 1.875, 'grad_norm': 0.0, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:24<07:04, 3.69s/it] 78%|███████▊ | 406/520 [25:28<07:01, 3.70s/it] {'loss': 2.1806, 'grad_norm': 0.0, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:28<07:01, 3.70s/it] 78%|███████▊ | 407/520 [25:31<06:58, 3.70s/it] {'loss': 2.0986, 'grad_norm': 0.0, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:31<06:58, 3.70s/it] 78%|███████▊ | 408/520 [25:35<06:53, 3.69s/it] {'loss': 2.1517, 'grad_norm': 0.0, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:35<06:53, 3.69s/it] 79%|███████▊ | 409/520 [25:39<06:49, 3.69s/it] {'loss': 2.2385, 'grad_norm': 0.0, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:39<06:49, 3.69s/it] 79%|███████▉ | 410/520 [25:42<06:46, 3.69s/it] {'loss': 2.1727, 'grad_norm': 0.0, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:42<06:46, 3.69s/it] 79%|███████▉ | 411/520 [25:46<06:42, 3.69s/it] {'loss': 2.195, 'grad_norm': 0.0, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:46<06:42, 3.69s/it] 79%|███████▉ | 412/520 [25:50<06:39, 3.70s/it] {'loss': 2.0965, 'grad_norm': 0.0, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:50<06:39, 3.70s/it] 79%|███████▉ | 413/520 [25:54<06:36, 3.70s/it] {'loss': 1.916, 'grad_norm': 0.0, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:54<06:36, 3.70s/it] 80%|███████▉ | 414/520 [25:57<06:33, 3.72s/it] {'loss': 1.757, 'grad_norm': 0.0, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:57<06:33, 3.72s/it] 80%|███████▉ | 415/520 [26:01<06:30, 3.72s/it] {'loss': 2.0894, 'grad_norm': 0.0, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:01<06:30, 3.72s/it] 80%|████████ | 416/520 [26:05<06:25, 3.71s/it] {'loss': 2.3404, 'grad_norm': 0.0, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:05<06:25, 3.71s/it] 80%|████████ | 417/520 [26:08<06:21, 3.71s/it] {'loss': 2.0376, 'grad_norm': 0.0, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:08<06:21, 3.71s/it] 80%|████████ | 418/520 [26:12<06:19, 3.73s/it] {'loss': 1.9876, 'grad_norm': 0.0, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:12<06:19, 3.73s/it] 81%|████████ | 419/520 [26:16<06:19, 3.76s/it] {'loss': 2.2701, 'grad_norm': 0.0, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:16<06:19, 3.76s/it] 81%|████████ | 420/520 [26:20<06:19, 3.80s/it] {'loss': 2.1783, 'grad_norm': 0.0, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:20<06:19, 3.80s/it] 81%|████████ | 421/520 [26:24<06:17, 3.82s/it] {'loss': 2.3788, 'grad_norm': 0.0, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:24<06:17, 3.82s/it] 81%|████████ | 422/520 [26:28<06:15, 3.83s/it] {'loss': 2.1751, 'grad_norm': 0.0, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:28<06:15, 3.83s/it] 81%|████████▏ | 423/520 [26:32<06:14, 3.86s/it] {'loss': 2.3239, 'grad_norm': 0.0, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:32<06:14, 3.86s/it] 82%|████████▏ | 424/520 [26:35<06:11, 3.87s/it] {'loss': 1.8431, 'grad_norm': 0.0, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:35<06:11, 3.87s/it] 82%|████████▏ | 425/520 [26:39<06:07, 3.87s/it] {'loss': 2.0338, 'grad_norm': 0.0, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:39<06:07, 3.87s/it] 82%|████████▏ | 426/520 [26:43<06:03, 3.86s/it] {'loss': 2.2809, 'grad_norm': 0.0, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:43<06:03, 3.86s/it] 82%|████████▏ | 427/520 [26:47<05:58, 3.86s/it] {'loss': 1.9615, 'grad_norm': 0.0, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:47<05:58, 3.86s/it] 82%|████████▏ | 428/520 [26:51<05:54, 3.86s/it] {'loss': 2.179, 'grad_norm': 0.0, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:51<05:54, 3.86s/it] 82%|████████▎ | 429/520 [26:55<05:51, 3.86s/it] {'loss': 2.1882, 'grad_norm': 0.0, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:55<05:51, 3.86s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:59<05:46, 3.86s/it] {'loss': 2.0206, 'grad_norm': 0.0, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:59<05:46, 3.86s/it] 83%|████████▎ | 431/520 [27:02<05:42, 3.85s/it] {'loss': 1.8737, 'grad_norm': 0.0, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:02<05:42, 3.85s/it] 83%|████████▎ | 432/520 [27:06<05:39, 3.86s/it] {'loss': 2.0893, 'grad_norm': 0.0, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:06<05:39, 3.86s/it] 83%|████████▎ | 433/520 [27:10<05:35, 3.86s/it] {'loss': 2.1446, 'grad_norm': 0.0, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:10<05:35, 3.86s/it] 83%|████████▎ | 434/520 [27:14<05:27, 3.81s/it] {'loss': 2.162, 'grad_norm': 0.0, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:14<05:27, 3.81s/it] 84%|████████▎ | 435/520 [27:17<05:20, 3.77s/it] {'loss': 2.1714, 'grad_norm': 0.0, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:17<05:20, 3.77s/it] 84%|████████▍ | 436/520 [27:21<05:14, 3.74s/it] {'loss': 2.1073, 'grad_norm': 0.0, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:21<05:14, 3.74s/it] 84%|████████▍ | 437/520 [27:25<05:10, 3.74s/it] {'loss': 2.1399, 'grad_norm': 0.0, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:25<05:10, 3.74s/it] 84%|████████▍ | 438/520 [27:29<05:05, 3.73s/it] {'loss': 2.1034, 'grad_norm': 0.0, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:29<05:05, 3.73s/it] 84%|████████▍ | 439/520 [27:32<05:00, 3.71s/it] {'loss': 1.7664, 'grad_norm': 0.0, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:32<05:00, 3.71s/it] 85%|████████▍ | 440/520 [27:36<04:55, 3.69s/it] {'loss': 2.0058, 'grad_norm': 0.0, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:36<04:55, 3.69s/it] 85%|████████▍ | 441/520 [27:40<04:51, 3.69s/it] {'loss': 1.8248, 'grad_norm': 0.0, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:40<04:51, 3.69s/it] 85%|████████▌ | 442/520 [27:43<04:48, 3.70s/it] {'loss': 2.3179, 'grad_norm': 0.0, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:43<04:48, 3.70s/it] 85%|████████▌ | 443/520 [27:47<04:43, 3.69s/it] {'loss': 2.0141, 'grad_norm': 0.0, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:47<04:43, 3.69s/it] 85%|████████▌ | 444/520 [27:51<04:40, 3.69s/it] {'loss': 1.9937, 'grad_norm': 0.0, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:51<04:40, 3.69s/it] 86%|████████▌ | 445/520 [27:54<04:36, 3.69s/it] {'loss': 1.9637, 'grad_norm': 0.0, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:54<04:36, 3.69s/it] 86%|████████▌ | 446/520 [27:58<04:32, 3.68s/it] {'loss': 1.8401, 'grad_norm': 0.0, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:58<04:32, 3.68s/it] 86%|████████▌ | 447/520 [28:02<04:28, 3.68s/it] {'loss': 2.1475, 'grad_norm': 0.0, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:02<04:28, 3.68s/it] 86%|████████▌ | 448/520 [28:05<04:24, 3.67s/it] {'loss': 2.0884, 'grad_norm': 0.0, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:05<04:24, 3.67s/it] 86%|████████▋ | 449/520 [28:09<04:20, 3.67s/it] {'loss': 1.9783, 'grad_norm': 0.0, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:09<04:20, 3.67s/it] 87%|████████▋ | 450/520 [28:13<04:17, 3.67s/it] {'loss': 2.1244, 'grad_norm': 0.0, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:13<04:17, 3.67s/it] 87%|████████▋ | 451/520 [28:16<04:13, 3.67s/it] {'loss': 2.1608, 'grad_norm': 0.0, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:16<04:13, 3.67s/it] 87%|████████▋ | 452/520 [28:20<04:09, 3.67s/it] {'loss': 1.8367, 'grad_norm': 0.0, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:20<04:09, 3.67s/it] 87%|████████▋ | 453/520 [28:24<04:06, 3.67s/it] {'loss': 1.9767, 'grad_norm': 0.0, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:24<04:06, 3.67s/it] 87%|████████▋ | 454/520 [28:27<04:02, 3.67s/it] {'loss': 2.0911, 'grad_norm': 0.0, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:27<04:02, 3.67s/it] 88%|████████▊ | 455/520 [28:31<03:58, 3.66s/it] {'loss': 2.0563, 'grad_norm': 0.0, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:31<03:58, 3.66s/it] 88%|████████▊ | 456/520 [28:35<03:54, 3.66s/it] {'loss': 2.0794, 'grad_norm': 0.0, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:35<03:54, 3.66s/it] 88%|████████▊ | 457/520 [28:38<03:51, 3.67s/it] {'loss': 1.7164, 'grad_norm': 0.0, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:38<03:51, 3.67s/it] 88%|████████▊ | 458/520 [28:42<03:47, 3.67s/it] {'loss': 2.2316, 'grad_norm': 0.0, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:42<03:47, 3.67s/it] 88%|████████▊ | 459/520 [28:46<03:43, 3.67s/it] {'loss': 2.0823, 'grad_norm': 0.0, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:46<03:43, 3.67s/it] 88%|████████▊ | 460/520 [28:49<03:40, 3.67s/it] {'loss': 2.1604, 'grad_norm': 0.0, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:49<03:40, 3.67s/it] 89%|████████▊ | 461/520 [28:53<03:37, 3.68s/it] {'loss': 1.499, 'grad_norm': 0.0, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:53<03:37, 3.68s/it] 89%|████████▉ | 462/520 [28:57<03:32, 3.67s/it] {'loss': 1.9033, 'grad_norm': 0.0, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:57<03:32, 3.67s/it] 89%|████████▉ | 463/520 [29:00<03:28, 3.67s/it] {'loss': 2.3309, 'grad_norm': 0.0, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:00<03:28, 3.67s/it] 89%|████████▉ | 464/520 [29:04<03:24, 3.66s/it] {'loss': 2.1128, 'grad_norm': 0.0, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:04<03:24, 3.66s/it] 89%|████████▉ | 465/520 [29:08<03:21, 3.66s/it] {'loss': 2.1251, 'grad_norm': 0.0, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:08<03:21, 3.66s/it] 90%|████████▉ | 466/520 [29:11<03:17, 3.66s/it] {'loss': 1.9657, 'grad_norm': 0.0, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:11<03:17, 3.66s/it] 90%|████████▉ | 467/520 [29:15<03:14, 3.66s/it] {'loss': 1.8888, 'grad_norm': 0.0, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:15<03:14, 3.66s/it] 90%|█████████ | 468/520 [29:19<03:10, 3.67s/it] {'loss': 2.2701, 'grad_norm': 0.0, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:19<03:10, 3.67s/it] 90%|█████████ | 469/520 [29:22<03:06, 3.67s/it] {'loss': 2.1097, 'grad_norm': 0.0, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:22<03:06, 3.67s/it] 90%|█████████ | 470/520 [29:26<03:03, 3.66s/it] {'loss': 2.0168, 'grad_norm': 0.0, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:26<03:03, 3.66s/it] 91%|█████████ | 471/520 [29:30<02:59, 3.66s/it] {'loss': 2.2295, 'grad_norm': 0.0, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:30<02:59, 3.66s/it] 91%|█████████ | 472/520 [29:33<02:56, 3.68s/it] {'loss': 2.1844, 'grad_norm': 0.0, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:33<02:56, 3.68s/it] 91%|█████████ | 473/520 [29:37<02:52, 3.67s/it] {'loss': 2.2132, 'grad_norm': 0.0, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:37<02:52, 3.67s/it] 91%|█████████ | 474/520 [29:41<02:48, 3.67s/it] {'loss': 1.9064, 'grad_norm': 0.0, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:41<02:48, 3.67s/it] 91%|█████████▏| 475/520 [29:44<02:45, 3.67s/it] {'loss': 1.8533, 'grad_norm': 0.0, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:44<02:45, 3.67s/it] 92%|█████████▏| 476/520 [29:48<02:41, 3.66s/it] {'loss': 2.165, 'grad_norm': 0.0, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:48<02:41, 3.66s/it] 92%|█████████▏| 477/520 [29:52<02:37, 3.67s/it] {'loss': 2.156, 'grad_norm': 0.0, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:52<02:37, 3.67s/it] 92%|█████████▏| 478/520 [29:55<02:34, 3.67s/it] {'loss': 2.069, 'grad_norm': 0.0, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:55<02:34, 3.67s/it] 92%|█████████▏| 479/520 [29:59<02:30, 3.67s/it] {'loss': 1.9471, 'grad_norm': 0.0, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:59<02:30, 3.67s/it] 92%|█████████▏| 480/520 [30:03<02:27, 3.69s/it] {'loss': 1.9655, 'grad_norm': 0.0, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:03<02:27, 3.69s/it] 92%|█████████▎| 481/520 [30:07<02:23, 3.69s/it] {'loss': 1.8537, 'grad_norm': 0.0, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:07<02:23, 3.69s/it] 93%|█████████▎| 482/520 [30:10<02:20, 3.69s/it] {'loss': 1.8943, 'grad_norm': 0.0, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:10<02:20, 3.69s/it] 93%|█████████▎| 483/520 [30:14<02:16, 3.68s/it] {'loss': 2.0377, 'grad_norm': 0.0, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:14<02:16, 3.68s/it] 93%|█████████▎| 484/520 [30:18<02:13, 3.71s/it] {'loss': 2.1901, 'grad_norm': 0.0, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:18<02:13, 3.71s/it] 93%|█████████▎| 485/520 [30:22<02:11, 3.76s/it] {'loss': 2.0144, 'grad_norm': 0.0, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:22<02:11, 3.76s/it] 93%|█████████▎| 486/520 [30:25<02:08, 3.78s/it] {'loss': 2.0347, 'grad_norm': 0.0, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:25<02:08, 3.78s/it] 94%|█████████▎| 487/520 [30:29<02:05, 3.81s/it] {'loss': 2.0192, 'grad_norm': 0.0, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:29<02:05, 3.81s/it] 94%|█████████▍| 488/520 [30:33<02:02, 3.83s/it] {'loss': 2.1552, 'grad_norm': 0.0, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:33<02:02, 3.83s/it] 94%|█████████▍| 489/520 [30:37<01:58, 3.83s/it] {'loss': 1.7671, 'grad_norm': 0.0, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:37<01:58, 3.83s/it] 94%|█████████▍| 490/520 [30:41<01:55, 3.84s/it] {'loss': 2.1193, 'grad_norm': 0.0, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:41<01:55, 3.84s/it] 94%|█████████▍| 491/520 [30:45<01:51, 3.85s/it] {'loss': 2.1849, 'grad_norm': 0.0, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:45<01:51, 3.85s/it] 95%|█████████▍| 492/520 [30:49<01:48, 3.86s/it] {'loss': 2.1631, 'grad_norm': 0.0, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:49<01:48, 3.86s/it] 95%|█████████▍| 493/520 [30:52<01:44, 3.87s/it] {'loss': 1.9782, 'grad_norm': 0.0, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:52<01:44, 3.87s/it] 95%|█████████▌| 494/520 [30:56<01:40, 3.87s/it] {'loss': 2.001, 'grad_norm': 0.0, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:56<01:40, 3.87s/it] 95%|█████████▌| 495/520 [31:00<01:37, 3.89s/it] {'loss': 2.0557, 'grad_norm': 0.0, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:00<01:37, 3.89s/it] 95%|█████████▌| 496/520 [31:04<01:33, 3.90s/it] {'loss': 2.1532, 'grad_norm': 0.0, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:04<01:33, 3.90s/it] 96%|█████████▌| 497/520 [31:08<01:29, 3.89s/it] {'loss': 1.8146, 'grad_norm': 0.0, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:08<01:29, 3.89s/it] 96%|█████████▌| 498/520 [31:12<01:25, 3.88s/it] {'loss': 2.109, 'grad_norm': 0.0, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:12<01:25, 3.88s/it] 96%|█████████▌| 499/520 [31:16<01:21, 3.90s/it] {'loss': 2.0238, 'grad_norm': 0.0, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:16<01:21, 3.90s/it] 96%|█████████▌| 500/520 [31:20<01:18, 3.90s/it] {'loss': 2.3196, 'grad_norm': 0.0, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:20<01:18, 3.90s/it] 96%|█████████▋| 501/520 [31:24<01:14, 3.91s/it] {'loss': 2.0128, 'grad_norm': 0.0, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:24<01:14, 3.91s/it] 97%|█████████▋| 502/520 [31:28<01:10, 3.90s/it] {'loss': 2.0904, 'grad_norm': 0.0, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:28<01:10, 3.90s/it] 97%|█████████▋| 503/520 [31:31<01:06, 3.90s/it] {'loss': 1.888, 'grad_norm': 0.0, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:31<01:06, 3.90s/it] 97%|█████████▋| 504/520 [31:35<01:02, 3.91s/it] {'loss': 2.3021, 'grad_norm': 0.0, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:35<01:02, 3.91s/it] 97%|█████████▋| 505/520 [31:39<00:58, 3.91s/it] {'loss': 2.1435, 'grad_norm': 0.0, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:39<00:58, 3.91s/it] 97%|█████████▋| 506/520 [31:43<00:54, 3.92s/it] {'loss': 2.1081, 'grad_norm': 0.0, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:43<00:54, 3.92s/it] 98%|█████████▊| 507/520 [31:47<00:51, 3.92s/it] {'loss': 1.9223, 'grad_norm': 0.0, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:47<00:51, 3.92s/it] 98%|█████████▊| 508/520 [31:51<00:46, 3.86s/it] {'loss': 2.1602, 'grad_norm': 0.0, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:51<00:46, 3.86s/it] 98%|█████████▊| 509/520 [31:55<00:41, 3.80s/it] {'loss': 2.0725, 'grad_norm': 0.0, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:55<00:41, 3.80s/it] 98%|█████████▊| 510/520 [31:58<00:37, 3.76s/it] {'loss': 2.1279, 'grad_norm': 0.0, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:58<00:37, 3.76s/it] 98%|█████████▊| 511/520 [32:02<00:33, 3.73s/it] {'loss': 2.0474, 'grad_norm': 0.0, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:02<00:33, 3.73s/it] 98%|█████████▊| 512/520 [32:06<00:29, 3.70s/it] {'loss': 2.0024, 'grad_norm': 0.0, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:06<00:29, 3.70s/it] 99%|█████████▊| 513/520 [32:09<00:25, 3.70s/it] {'loss': 2.1995, 'grad_norm': 0.0, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:09<00:25, 3.70s/it] 99%|█████████▉| 514/520 [32:13<00:22, 3.69s/it] {'loss': 2.0174, 'grad_norm': 0.0, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:13<00:22, 3.69s/it] 99%|█████████▉| 515/520 [32:16<00:18, 3.66s/it] {'loss': 2.2217, 'grad_norm': 0.0, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:16<00:18, 3.66s/it] 99%|█████████▉| 516/520 [32:20<00:14, 3.67s/it] {'loss': 2.1999, 'grad_norm': 0.0, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:20<00:14, 3.67s/it] 99%|█████████▉| 517/520 [32:24<00:10, 3.65s/it] {'loss': 1.8719, 'grad_norm': 0.0, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:24<00:10, 3.65s/it] 100%|█████████▉| 518/520 [32:27<00:07, 3.63s/it] {'loss': 2.1615, 'grad_norm': 0.0, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:27<00:07, 3.63s/it] 100%|█████████▉| 519/520 [32:31<00:03, 3.63s/it] {'loss': 1.9508, 'grad_norm': 0.0, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:31<00:03, 3.63s/it] 100%|██████████| 520/520 [32:35<00:00, 3.89s/it] {'loss': 1.7443, 'grad_norm': 0.0, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:35<00:00, 3.89s/it] {'train_runtime': 1955.9961, 'train_samples_per_second': 34.013, 'train_steps_per_second': 0.266, 'train_loss': 2.066992656771953, 'epoch': 1.0} + 100%|██████████| 520/520 [32:35<00:00, 3.89s/it] 100%|██████████| 520/520 [32:35<00:00, 3.76s/it] +[2025-10-13 16:55:57,322] [INFO] [launch.py:348:main] Process 888526 exits successfully. +[2025-10-13 16:55:58,323] [INFO] [launch.py:348:main] Process 888525 exits successfully. +[2025-10-13 16:55:58,324] [INFO] [launch.py:348:main] Process 888523 exits successfully. +[2025-10-13 16:55:58,324] [INFO] [launch.py:348:main] Process 888522 exits successfully. +[2025-10-13 16:55:58,324] [INFO] [launch.py:348:main] Process 888520 exits successfully. +[2025-10-13 16:55:58,325] [INFO] [launch.py:348:main] Process 888524 exits successfully. +[2025-10-13 16:55:58,325] [INFO] [launch.py:348:main] Process 888521 exits successfully. +[2025-10-13 16:56:01,329] [INFO] [launch.py:348:main] Process 888519 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.7_2e-1_connector-5.0_0.7_2e-1_ablation_20251013_162143.log +Timestamp: 2025-10-13 16:56:03 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation_20251013_165603.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation_20251013_165603.log new file mode 100644 index 0000000000000000000000000000000000000000..bcf1001291a1357a695892b4970e8b0b6c01c32c --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation_20251013_165603.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation_20251013_165603.log +Timestamp: 2025-10-13 16:56:03 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 16:56:06,487] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:09,451] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 16:56:09,452] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 0.9 --temperature_mlp_text 0.9 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 0.9 --temperature_mlp_vision 0.9 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 0.9 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 16:56:12,049] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:13,083] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 16:56:13,083] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 16:56:13,083] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 16:56:13,083] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 16:56:13,083] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 16:56:13,083] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 16:56:13,083] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 16:56:13,086] [INFO] [launch.py:253:main] process 908624 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:56:13,088] [INFO] [launch.py:253:main] process 908625 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:56:13,090] [INFO] [launch.py:253:main] process 908626 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:56:13,092] [INFO] [launch.py:253:main] process 908627 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:56:13,094] [INFO] [launch.py:253:main] process 908628 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:56:13,097] [INFO] [launch.py:253:main] process 908629 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:56:13,099] [INFO] [launch.py:253:main] process 908630 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 16:56:13,101] [INFO] [launch.py:253:main] process 908631 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 16:56:19,495] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:19,650] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:19,890] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:19,905] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:56:20,044] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:56:20,107] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:20,115] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:20,115] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:20,127] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:20,150] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 16:56:20,294] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:56:20,504] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:56:20,504] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 16:56:20,524] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:56:20,526] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:56:20,527] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 16:56:20,559] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.9, 'temperature_mlp': 0.9, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.9, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.9, + "temperature_mlp": 0.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test1-worker-0:908624:908624 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908624:908624 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908624:908624 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:908624:908624 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:908624:908624 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:908624:908624 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:908629:908629 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:908629:908629 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908629:908629 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908627:908627 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:908627:908627 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908627:908627 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908629:908629 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:908629:908629 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:908629:908629 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:908627:908627 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:908627:908627 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:908627:908627 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:908625:908625 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:908625:908625 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908625:908625 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908625:908625 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:908625:908625 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:908625:908625 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:908628:908628 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:908628:908628 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908628:908628 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908628:908628 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:908628:908628 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:908628:908628 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:908631:908631 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:908631:908631 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:908631:908631 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908631:908631 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:908631:908631 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:908631:908631 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:908630:908630 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:908630:908630 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908630:908630 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908626:908626 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:908626:908626 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908626:908626 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908630:908630 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:908630:908630 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:908630:908630 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:908626:908626 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:908626:908626 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:908626:908626 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO ncclCommInitRank comm 0x55cb12f97ac0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x69f871b8f43a92d2 - Init START +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO ncclCommInitRank comm 0x55dbed1fcf80 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x69f871b8f43a92d2 - Init START +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO ncclCommInitRank comm 0x55e3a5068340 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x69f871b8f43a92d2 - Init START +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO ncclCommInitRank comm 0x565381c0ffd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x69f871b8f43a92d2 - Init START +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO ncclCommInitRank comm 0x55699dbabc50 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x69f871b8f43a92d2 - Init START +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO ncclCommInitRank comm 0x55bf81f2a6c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x69f871b8f43a92d2 - Init START +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO ncclCommInitRank comm 0x55a8566e1cc0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x69f871b8f43a92d2 - Init START +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO ncclCommInitRank comm 0x5603a4335c80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x69f871b8f43a92d2 - Init START +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO comm 0x55dbed1fcf80 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO comm 0x55bf81f2a6c0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO comm 0x55a8566e1cc0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO comm 0x565381c0ffd0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO comm 0x55699dbabc50 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO comm 0x55cb12f97ac0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO comm 0x5603a4335c80 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO comm 0x55e3a5068340 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:908628:910209 [4] NCCL INFO ncclCommInitRank comm 0x5603a4335c80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x69f871b8f43a92d2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908630:910211 [6] NCCL INFO ncclCommInitRank comm 0x55cb12f97ac0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x69f871b8f43a92d2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908631:910210 [7] NCCL INFO ncclCommInitRank comm 0x55699dbabc50 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x69f871b8f43a92d2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:908625:910208 [1] NCCL INFO ncclCommInitRank comm 0x55bf81f2a6c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x69f871b8f43a92d2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908627:910207 [3] NCCL INFO ncclCommInitRank comm 0x565381c0ffd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x69f871b8f43a92d2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908624:910205 [0] NCCL INFO ncclCommInitRank comm 0x55a8566e1cc0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x69f871b8f43a92d2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908626:910212 [2] NCCL INFO ncclCommInitRank comm 0x55dbed1fcf80 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x69f871b8f43a92d2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:908629:910206 [5] NCCL INFO ncclCommInitRank comm 0x55e3a5068340 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x69f871b8f43a92d2 - Init COMPLETE +[2025-10-13 16:57:06,585] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 16:57:08,364] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 16:57:26,142 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 16:57:26,155 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:908624:915136 [0] NCCL INFO ncclCommInitRank comm 0x7f60b406b230 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xf1b4256c8ae38fb1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908628:915141 [4] NCCL INFO ncclCommInitRank comm 0x7fa7ec06ade0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xf1b4256c8ae38fb1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908629:915137 [5] NCCL INFO ncclCommInitRank comm 0x7f5e5406b5d0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xf1b4256c8ae38fb1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908631:915139 [7] NCCL INFO ncclCommInitRank comm 0x7f1d1c06aff0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xf1b4256c8ae38fb1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908627:915138 [3] NCCL INFO ncclCommInitRank comm 0x7f40f806acc0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xf1b4256c8ae38fb1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908625:915140 [1] NCCL INFO ncclCommInitRank comm 0x7f7d2406b1e0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xf1b4256c8ae38fb1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908626:915142 [2] NCCL INFO ncclCommInitRank comm 0x7ff70806a940 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xf1b4256c8ae38fb1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:908630:915143 [6] NCCL INFO ncclCommInitRank comm 0x7fe6c006b2b0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xf1b4256c8ae38fb1 - Init COMPLETE + 0%| | 1/520 [00:28<4:06:50, 28.54s/it] {'loss': 2.0453, 'grad_norm': 0.0026972364646064857, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:28<4:06:50, 28.54s/it] 0%| | 2/520 [00:32<1:59:48, 13.88s/it] {'loss': 2.0549, 'grad_norm': 0.0029274710954265495, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:32<1:59:48, 13.88s/it] 1%| | 3/520 [00:35<1:19:17, 9.20s/it] {'loss': 2.1899, 'grad_norm': 0.003350028816559514, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:35<1:19:17, 9.20s/it] 1%| | 4/520 [00:39<1:00:17, 7.01s/it] {'loss': 2.0656, 'grad_norm': 0.0027688007506609775, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:39<1:00:17, 7.01s/it] 1%| | 5/520 [00:43<49:47, 5.80s/it] {'loss': 2.2333, 'grad_norm': 0.0030575897334033475, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:43<49:47, 5.80s/it] 1%| | 6/520 [00:46<43:26, 5.07s/it] {'loss': 1.6754, 'grad_norm': 0.0015633539324223973, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:46<43:26, 5.07s/it] 1%|▏ | 7/520 [00:50<39:15, 4.59s/it] {'loss': 2.0776, 'grad_norm': 0.0030206776077081167, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:50<39:15, 4.59s/it] 2%|▏ | 8/520 [00:54<38:20, 4.49s/it] {'loss': 2.0541, 'grad_norm': 0.0025501506776947473, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:54<38:20, 4.49s/it] 2%|▏ | 9/520 [00:58<37:30, 4.40s/it] {'loss': 2.0172, 'grad_norm': 0.0023814515334927527, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:58<37:30, 4.40s/it] 2%|▏ | 10/520 [01:02<35:29, 4.17s/it] {'loss': 1.6533, 'grad_norm': 0.002240399350097167, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [01:02<35:29, 4.17s/it] 2%|▏ | 11/520 [01:06<34:19, 4.05s/it] {'loss': 1.6096, 'grad_norm': 0.001287970222016225, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [01:06<34:19, 4.05s/it] 2%|▏ | 12/520 [01:09<33:13, 3.92s/it] {'loss': 1.4289, 'grad_norm': 0.0006723411344214017, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [01:09<33:13, 3.92s/it][2025-10-13 16:58:45,302] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:14<34:09, 4.04s/it] {'loss': 1.5469, 'grad_norm': 0.0007924317154241557, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:14<34:09, 4.04s/it] 3%|▎ | 14/520 [01:17<33:00, 3.91s/it] {'loss': 1.5563, 'grad_norm': 0.0006367597490377345, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:17<33:00, 3.91s/it] 3%|▎ | 15/520 [01:21<32:14, 3.83s/it] {'loss': 1.455, 'grad_norm': 0.0005868919432736048, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:21<32:14, 3.83s/it] 3%|▎ | 16/520 [01:25<31:42, 3.77s/it] {'loss': 1.4289, 'grad_norm': 0.0005543009937923737, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:25<31:42, 3.77s/it] 3%|▎ | 17/520 [01:28<31:17, 3.73s/it] {'loss': 1.5399, 'grad_norm': 0.0005137164539134108, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:28<31:17, 3.73s/it] 3%|▎ | 18/520 [01:32<30:56, 3.70s/it] {'loss': 1.3939, 'grad_norm': 0.0005621725462625708, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:32<30:56, 3.70s/it] 4%|▎ | 19/520 [01:35<30:41, 3.68s/it] {'loss': 1.3849, 'grad_norm': 0.00047486194245234265, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:36<30:41, 3.68s/it] 4%|▍ | 20/520 [01:39<30:33, 3.67s/it] {'loss': 1.3586, 'grad_norm': 0.0005729980708733766, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:39<30:33, 3.67s/it] 4%|▍ | 21/520 [01:43<30:54, 3.72s/it] {'loss': 1.3759, 'grad_norm': 0.0006198736150154538, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:43<30:54, 3.72s/it] 4%|▍ | 22/520 [01:47<31:01, 3.74s/it] {'loss': 1.4853, 'grad_norm': 0.000602018256877278, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:47<31:01, 3.74s/it] 4%|▍ | 23/520 [01:51<31:11, 3.77s/it] {'loss': 1.4176, 'grad_norm': 0.0006310441241431508, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:51<31:11, 3.77s/it] 5%|▍ | 24/520 [01:54<31:12, 3.77s/it] {'loss': 1.3342, 'grad_norm': 0.000691983498741663, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:54<31:12, 3.77s/it] 5%|▍ | 25/520 [01:58<31:15, 3.79s/it] {'loss': 1.423, 'grad_norm': 0.0007195008320095756, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:58<31:15, 3.79s/it] 5%|▌ | 26/520 [02:02<31:13, 3.79s/it] {'loss': 1.3526, 'grad_norm': 0.0006105896215258166, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [02:02<31:13, 3.79s/it] 5%|▌ | 27/520 [02:06<31:12, 3.80s/it] {'loss': 1.2843, 'grad_norm': 0.0006652738217893547, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [02:06<31:12, 3.80s/it] 5%|▌ | 28/520 [02:10<31:10, 3.80s/it] {'loss': 1.3106, 'grad_norm': 0.000706715090513927, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [02:10<31:10, 3.80s/it] 6%|▌ | 29/520 [02:13<31:08, 3.81s/it] {'loss': 1.3272, 'grad_norm': 0.0007235315406201485, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:13<31:08, 3.81s/it] 6%|▌ | 30/520 [02:17<31:04, 3.81s/it] {'loss': 1.3916, 'grad_norm': 0.0006276974452785667, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:17<31:04, 3.81s/it] 6%|▌ | 31/520 [02:21<30:48, 3.78s/it] {'loss': 1.2881, 'grad_norm': 0.0006272600260167917, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:21<30:48, 3.78s/it] 6%|▌ | 32/520 [02:25<30:31, 3.75s/it] {'loss': 1.2227, 'grad_norm': 0.000696530630456894, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:25<30:31, 3.75s/it] 6%|▋ | 33/520 [02:28<30:13, 3.72s/it] {'loss': 1.2831, 'grad_norm': 0.000804300032276725, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:28<30:13, 3.72s/it] 7%|▋ | 34/520 [02:32<29:58, 3.70s/it] {'loss': 1.2735, 'grad_norm': 0.0007923284546763958, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:32<29:58, 3.70s/it] 7%|▋ | 35/520 [02:36<29:46, 3.68s/it] {'loss': 1.2856, 'grad_norm': 0.0008605048575912531, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:36<29:46, 3.68s/it] 7%|▋ | 36/520 [02:39<29:39, 3.68s/it] {'loss': 1.3758, 'grad_norm': 0.0007250638414440763, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:39<29:39, 3.68s/it] 7%|▋ | 37/520 [02:43<29:30, 3.67s/it] {'loss': 1.3596, 'grad_norm': 0.0007213324427744202, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:43<29:30, 3.67s/it] 7%|▋ | 38/520 [02:47<29:25, 3.66s/it] {'loss': 1.4426, 'grad_norm': 0.0007466207003900771, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:47<29:25, 3.66s/it] 8%|▊ | 39/520 [02:50<29:16, 3.65s/it] {'loss': 1.318, 'grad_norm': 0.0009259413473778862, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:50<29:16, 3.65s/it] 8%|▊ | 40/520 [02:54<29:11, 3.65s/it] {'loss': 1.3391, 'grad_norm': 0.000719071614396786, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:54<29:11, 3.65s/it] 8%|▊ | 41/520 [02:57<29:06, 3.65s/it] {'loss': 1.3142, 'grad_norm': 0.0007678479068083422, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:57<29:06, 3.65s/it] 8%|▊ | 42/520 [03:01<29:02, 3.65s/it] {'loss': 1.3113, 'grad_norm': 0.0009788541523643168, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [03:01<29:02, 3.65s/it] 8%|▊ | 43/520 [03:05<29:02, 3.65s/it] {'loss': 1.2452, 'grad_norm': 0.0007071502481956971, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [03:05<29:02, 3.65s/it] 8%|▊ | 44/520 [03:08<29:00, 3.66s/it] {'loss': 1.3484, 'grad_norm': 0.0007873928589338684, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [03:08<29:00, 3.66s/it] 9%|▊ | 45/520 [03:12<28:58, 3.66s/it] {'loss': 1.3327, 'grad_norm': 0.0008367000280722047, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:12<28:58, 3.66s/it] 9%|▉ | 46/520 [03:16<28:51, 3.65s/it] {'loss': 1.3866, 'grad_norm': 0.0008375766916045692, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:16<28:51, 3.65s/it] 9%|▉ | 47/520 [03:19<28:58, 3.68s/it] {'loss': 1.3022, 'grad_norm': 0.000850721904544807, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:19<28:58, 3.68s/it] 9%|▉ | 48/520 [03:23<28:50, 3.67s/it] {'loss': 1.3002, 'grad_norm': 0.00097849276316032, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:23<28:50, 3.67s/it] 9%|▉ | 49/520 [03:27<28:56, 3.69s/it] {'loss': 1.3361, 'grad_norm': 0.000920247605598531, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:27<28:56, 3.69s/it] 10%|▉ | 50/520 [03:31<29:01, 3.71s/it] {'loss': 1.3231, 'grad_norm': 0.0008669005356822797, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:31<29:01, 3.71s/it] 10%|▉ | 51/520 [03:34<28:48, 3.69s/it] {'loss': 1.2658, 'grad_norm': 0.0009483025508463598, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:34<28:48, 3.69s/it] 10%|█ | 52/520 [03:38<28:35, 3.67s/it] {'loss': 1.3943, 'grad_norm': 0.0010092751032161201, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:38<28:35, 3.67s/it] 10%|█ | 53/520 [03:42<28:27, 3.66s/it] {'loss': 1.3703, 'grad_norm': 0.0009393261235489063, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:42<28:27, 3.66s/it] 10%|█ | 54/520 [03:45<28:26, 3.66s/it] {'loss': 1.3057, 'grad_norm': 0.0008838359946123372, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:45<28:26, 3.66s/it] 11%|█ | 55/520 [03:49<28:25, 3.67s/it] {'loss': 1.264, 'grad_norm': 0.000982038240214157, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:49<28:25, 3.67s/it] 11%|█ | 56/520 [03:53<28:17, 3.66s/it] {'loss': 1.3873, 'grad_norm': 0.0009013314379422694, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:53<28:17, 3.66s/it] 11%|█ | 57/520 [03:56<28:08, 3.65s/it] {'loss': 1.251, 'grad_norm': 0.0010210876575527787, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:56<28:08, 3.65s/it] 11%|█ | 58/520 [04:00<28:14, 3.67s/it] {'loss': 1.4074, 'grad_norm': 0.0008041626085128515, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [04:00<28:14, 3.67s/it] 11%|█▏ | 59/520 [04:04<28:29, 3.71s/it] {'loss': 1.2225, 'grad_norm': 0.0008900332368967478, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [04:04<28:29, 3.71s/it] 12%|█▏ | 60/520 [04:07<28:21, 3.70s/it] {'loss': 1.3124, 'grad_norm': 0.0008652187860769725, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:07<28:21, 3.70s/it] 12%|█▏ | 61/520 [04:11<28:12, 3.69s/it] {'loss': 1.2984, 'grad_norm': 0.0009514784456188759, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:11<28:12, 3.69s/it] 12%|█▏ | 62/520 [04:15<28:02, 3.67s/it] {'loss': 1.3009, 'grad_norm': 0.0010162779410907068, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:15<28:02, 3.67s/it] 12%|█▏ | 63/520 [04:18<27:55, 3.67s/it] {'loss': 1.2919, 'grad_norm': 0.0009282885016325975, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:18<27:55, 3.67s/it] 12%|█▏ | 64/520 [04:22<27:50, 3.66s/it] {'loss': 1.3173, 'grad_norm': 0.0009591172881069826, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:22<27:50, 3.66s/it] 12%|█▎ | 65/520 [04:26<27:52, 3.67s/it] {'loss': 1.3249, 'grad_norm': 0.0011325819506356105, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:26<27:52, 3.67s/it] 13%|█▎ | 66/520 [04:29<27:45, 3.67s/it] {'loss': 1.2754, 'grad_norm': 0.0008972890686472682, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:29<27:45, 3.67s/it] 13%|█▎ | 67/520 [04:33<27:55, 3.70s/it] {'loss': 1.1862, 'grad_norm': 0.0009338188672561959, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:33<27:55, 3.70s/it] 13%|█▎ | 68/520 [04:37<28:04, 3.73s/it] {'loss': 1.2572, 'grad_norm': 0.0009657083461779261, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:37<28:04, 3.73s/it] 13%|█▎ | 69/520 [04:41<28:12, 3.75s/it] {'loss': 1.2413, 'grad_norm': 0.0010338867329390392, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:41<28:12, 3.75s/it] 13%|█▎ | 70/520 [04:44<28:17, 3.77s/it] {'loss': 1.2585, 'grad_norm': 0.0010159067061614976, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:44<28:17, 3.77s/it] 14%|█▎ | 71/520 [04:48<28:16, 3.78s/it] {'loss': 1.2071, 'grad_norm': 0.0009023317682087867, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:48<28:16, 3.78s/it] 14%|█▍ | 72/520 [04:52<28:03, 3.76s/it] {'loss': 1.3569, 'grad_norm': 0.0009587593108652638, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:52<28:03, 3.76s/it] 14%|█▍ | 73/520 [04:56<27:43, 3.72s/it] {'loss': 1.1837, 'grad_norm': 0.0009359016594848344, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:56<27:43, 3.72s/it] 14%|█▍ | 74/520 [04:59<27:27, 3.69s/it] {'loss': 1.2871, 'grad_norm': 0.0010266413232407274, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:59<27:27, 3.69s/it] 14%|█▍ | 75/520 [05:03<27:15, 3.68s/it] {'loss': 1.2103, 'grad_norm': 0.0008781897589402395, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [05:03<27:15, 3.68s/it] 15%|█▍ | 76/520 [05:07<27:05, 3.66s/it] {'loss': 1.3419, 'grad_norm': 0.0008132788048403225, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:07<27:05, 3.66s/it] 15%|█▍ | 77/520 [05:10<26:59, 3.66s/it] {'loss': 1.1324, 'grad_norm': 0.0010503311766854031, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:10<26:59, 3.66s/it] 15%|█▌ | 78/520 [05:14<26:56, 3.66s/it] {'loss': 1.2438, 'grad_norm': 0.0009638610435058072, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:14<26:56, 3.66s/it] 15%|█▌ | 79/520 [05:17<26:47, 3.65s/it] {'loss': 1.233, 'grad_norm': 0.0009546184765495405, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:17<26:47, 3.65s/it] 15%|█▌ | 80/520 [05:21<26:43, 3.64s/it] {'loss': 1.3267, 'grad_norm': 0.0009853242967151742, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:21<26:43, 3.64s/it] 16%|█▌ | 81/520 [05:25<26:42, 3.65s/it] {'loss': 1.3656, 'grad_norm': 0.0012396141037646692, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:25<26:42, 3.65s/it] 16%|█▌ | 82/520 [05:28<26:35, 3.64s/it] {'loss': 1.297, 'grad_norm': 0.0009555174507732951, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:28<26:35, 3.64s/it] 16%|█▌ | 83/520 [05:32<26:36, 3.65s/it] {'loss': 1.3075, 'grad_norm': 0.0010099582304372275, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:32<26:36, 3.65s/it] 16%|█▌ | 84/520 [05:36<26:30, 3.65s/it] {'loss': 1.323, 'grad_norm': 0.0010252483167816858, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:36<26:30, 3.65s/it] 16%|█▋ | 85/520 [05:39<26:23, 3.64s/it] {'loss': 1.3608, 'grad_norm': 0.0009686314307520678, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:39<26:23, 3.64s/it] 17%|█▋ | 86/520 [05:43<26:18, 3.64s/it] {'loss': 1.3545, 'grad_norm': 0.0009660896465080393, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:43<26:18, 3.64s/it] 17%|█▋ | 87/520 [05:47<26:16, 3.64s/it] {'loss': 1.2728, 'grad_norm': 0.0009390646849995877, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:47<26:16, 3.64s/it] 17%|█▋ | 88/520 [05:50<26:17, 3.65s/it] {'loss': 1.2194, 'grad_norm': 0.0007570176962731095, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:50<26:17, 3.65s/it] 17%|█▋ | 89/520 [05:54<26:15, 3.65s/it] {'loss': 1.3136, 'grad_norm': 0.0010170795840986654, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:54<26:15, 3.65s/it] 17%|█▋ | 90/520 [05:58<26:26, 3.69s/it] {'loss': 1.2501, 'grad_norm': 0.0009943313407911204, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:58<26:26, 3.69s/it] 18%|█▊ | 91/520 [06:01<26:36, 3.72s/it] {'loss': 1.3166, 'grad_norm': 0.000904483897367191, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:01<26:36, 3.72s/it] 18%|█▊ | 92/520 [06:05<26:39, 3.74s/it] {'loss': 1.2593, 'grad_norm': 0.0010523406300754472, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:05<26:39, 3.74s/it] 18%|█▊ | 93/520 [06:09<26:39, 3.75s/it] {'loss': 1.2623, 'grad_norm': 0.0010012572804241377, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:09<26:39, 3.75s/it] 18%|█▊ | 94/520 [06:13<26:45, 3.77s/it] {'loss': 1.3423, 'grad_norm': 0.0009459553005836432, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:13<26:45, 3.77s/it] 18%|█▊ | 95/520 [06:17<26:41, 3.77s/it] {'loss': 1.2458, 'grad_norm': 0.0011297643370693138, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:17<26:41, 3.77s/it] 18%|█▊ | 96/520 [06:20<26:45, 3.79s/it] {'loss': 1.2671, 'grad_norm': 0.0008762091035212887, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:20<26:45, 3.79s/it] 19%|█▊ | 97/520 [06:24<26:44, 3.79s/it] {'loss': 1.2337, 'grad_norm': 0.001091188424717688, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:24<26:44, 3.79s/it] 19%|█▉ | 98/520 [06:28<26:46, 3.81s/it] {'loss': 1.2375, 'grad_norm': 0.0008268938077398125, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:28<26:46, 3.81s/it] 19%|█▉ | 99/520 [06:32<26:35, 3.79s/it] {'loss': 1.2366, 'grad_norm': 0.0010118133808673362, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:32<26:35, 3.79s/it] 19%|█▉ | 100/520 [06:36<26:17, 3.75s/it] {'loss': 1.2224, 'grad_norm': 0.0008679845443476143, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:36<26:17, 3.75s/it] 19%|█▉ | 101/520 [06:39<25:59, 3.72s/it] {'loss': 1.2583, 'grad_norm': 0.0009680862519405732, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:39<25:59, 3.72s/it] 20%|█▉ | 102/520 [06:43<25:46, 3.70s/it] {'loss': 1.2603, 'grad_norm': 0.0010059964059244747, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:43<25:46, 3.70s/it] 20%|█▉ | 103/520 [06:46<25:36, 3.68s/it] {'loss': 1.1882, 'grad_norm': 0.0008772393172813465, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:46<25:36, 3.68s/it] 20%|██ | 104/520 [06:50<25:28, 3.67s/it] {'loss': 1.2638, 'grad_norm': 0.0009644439472313792, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:50<25:28, 3.67s/it] 20%|██ | 105/520 [06:54<25:52, 3.74s/it] {'loss': 1.2527, 'grad_norm': 0.0009046060898446635, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:54<25:52, 3.74s/it] 20%|██ | 106/520 [06:58<26:12, 3.80s/it] {'loss': 1.2529, 'grad_norm': 0.000866328472145276, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:58<26:12, 3.80s/it] 21%|██ | 107/520 [07:02<26:22, 3.83s/it] {'loss': 1.2211, 'grad_norm': 0.000900325518889216, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [07:02<26:22, 3.83s/it] 21%|██ | 108/520 [07:06<26:28, 3.86s/it] {'loss': 1.2126, 'grad_norm': 0.0010194020320194348, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [07:06<26:28, 3.86s/it] 21%|██ | 109/520 [07:10<26:41, 3.90s/it] {'loss': 1.2022, 'grad_norm': 0.0008214820081771782, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:10<26:41, 3.90s/it] 21%|██ | 110/520 [07:14<26:34, 3.89s/it] {'loss': 1.3916, 'grad_norm': 0.0009594831145293846, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:14<26:34, 3.89s/it] 21%|██▏ | 111/520 [07:17<26:26, 3.88s/it] {'loss': 1.3857, 'grad_norm': 0.0009947707603519184, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:17<26:26, 3.88s/it] 22%|██▏ | 112/520 [07:21<26:21, 3.88s/it] {'loss': 1.2758, 'grad_norm': 0.0008977369816807874, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:21<26:21, 3.88s/it] 22%|██▏ | 113/520 [07:25<26:24, 3.89s/it] {'loss': 1.1708, 'grad_norm': 0.0008899006305054124, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:25<26:24, 3.89s/it] 22%|██▏ | 114/520 [07:29<25:58, 3.84s/it] {'loss': 1.2662, 'grad_norm': 0.0009022876727523264, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:29<25:58, 3.84s/it] 22%|██▏ | 115/520 [07:33<25:33, 3.79s/it] {'loss': 1.3548, 'grad_norm': 0.0009068541648380708, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:33<25:33, 3.79s/it] 22%|██▏ | 116/520 [07:36<25:13, 3.75s/it] {'loss': 1.3673, 'grad_norm': 0.0008658751812305333, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:36<25:13, 3.75s/it] 22%|██▎ | 117/520 [07:40<25:00, 3.72s/it] {'loss': 1.3273, 'grad_norm': 0.0009364213605468873, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:40<25:00, 3.72s/it] 23%|██▎ | 118/520 [07:44<24:48, 3.70s/it] {'loss': 1.2576, 'grad_norm': 0.0008630735606008308, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:44<24:48, 3.70s/it] 23%|██▎ | 119/520 [07:47<24:35, 3.68s/it] {'loss': 1.2175, 'grad_norm': 0.0009284683251411882, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:47<24:35, 3.68s/it] 23%|██▎ | 120/520 [07:51<24:26, 3.67s/it] {'loss': 1.2267, 'grad_norm': 0.0010173336857649611, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:51<24:26, 3.67s/it] 23%|██▎ | 121/520 [07:55<24:22, 3.66s/it] {'loss': 1.2703, 'grad_norm': 0.0010100481393459705, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:55<24:22, 3.66s/it] 23%|██▎ | 122/520 [07:58<24:17, 3.66s/it] {'loss': 1.1886, 'grad_norm': 0.0008585187464051632, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:58<24:17, 3.66s/it] 24%|██▎ | 123/520 [08:02<24:17, 3.67s/it] {'loss': 1.2889, 'grad_norm': 0.0008813259825981198, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [08:02<24:17, 3.67s/it] 24%|██▍ | 124/520 [08:06<24:14, 3.67s/it] {'loss': 1.241, 'grad_norm': 0.0009987403957947341, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:06<24:14, 3.67s/it] 24%|██▍ | 125/520 [08:09<24:14, 3.68s/it] {'loss': 1.2419, 'grad_norm': 0.0008965931552584962, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:09<24:14, 3.68s/it] 24%|██▍ | 126/520 [08:14<25:34, 3.89s/it] {'loss': 1.2246, 'grad_norm': 0.0007819135408188089, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:14<25:34, 3.89s/it] 24%|██▍ | 127/520 [08:17<25:05, 3.83s/it] {'loss': 1.2193, 'grad_norm': 0.0010757759807353184, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:17<25:05, 3.83s/it] 25%|██▍ | 128/520 [08:21<24:44, 3.79s/it] {'loss': 1.2615, 'grad_norm': 0.0009525199562382111, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:21<24:44, 3.79s/it] 25%|██▍ | 129/520 [08:25<24:30, 3.76s/it] {'loss': 1.2288, 'grad_norm': 0.0008305074213657533, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:25<24:30, 3.76s/it] 25%|██▌ | 130/520 [08:28<24:10, 3.72s/it] {'loss': 1.2517, 'grad_norm': 0.0008078788203278338, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:28<24:10, 3.72s/it] 25%|██▌ | 131/520 [08:32<24:00, 3.70s/it] {'loss': 1.1903, 'grad_norm': 0.000816138653991652, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:32<24:00, 3.70s/it] 25%|██▌ | 132/520 [08:36<23:50, 3.69s/it] {'loss': 1.3018, 'grad_norm': 0.0009930012260895517, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:36<23:50, 3.69s/it] 26%|██▌ | 133/520 [08:39<23:40, 3.67s/it] {'loss': 1.2227, 'grad_norm': 0.001009738059067096, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:39<23:40, 3.67s/it] 26%|██▌ | 134/520 [08:43<23:37, 3.67s/it] {'loss': 1.2945, 'grad_norm': 0.0009096430452482086, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:43<23:37, 3.67s/it] 26%|██▌ | 135/520 [08:47<23:29, 3.66s/it] {'loss': 1.342, 'grad_norm': 0.0009091182630025232, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:47<23:29, 3.66s/it] 26%|██▌ | 136/520 [08:50<23:23, 3.66s/it] {'loss': 1.2913, 'grad_norm': 0.0009039774000957782, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:50<23:23, 3.66s/it] 26%|██▋ | 137/520 [08:54<23:20, 3.66s/it] {'loss': 1.2111, 'grad_norm': 0.0010169955332503544, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:54<23:20, 3.66s/it] 27%|██▋ | 138/520 [08:58<23:16, 3.66s/it] {'loss': 1.2276, 'grad_norm': 0.0008418147516126628, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:58<23:16, 3.66s/it] 27%|██▋ | 139/520 [09:01<23:13, 3.66s/it] {'loss': 1.1014, 'grad_norm': 0.0007924280609582133, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:01<23:13, 3.66s/it] 27%|██▋ | 140/520 [09:05<23:24, 3.70s/it] {'loss': 1.2433, 'grad_norm': 0.0008076751060365839, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:05<23:24, 3.70s/it] 27%|██▋ | 141/520 [09:09<23:33, 3.73s/it] {'loss': 1.3233, 'grad_norm': 0.0008459090902038244, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:09<23:33, 3.73s/it] 27%|██▋ | 142/520 [09:13<23:46, 3.77s/it] {'loss': 1.2462, 'grad_norm': 0.0008250278296057879, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:13<23:46, 3.77s/it] 28%|██▊ | 143/520 [09:17<23:48, 3.79s/it] {'loss': 1.2525, 'grad_norm': 0.0009456146466398315, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:17<23:48, 3.79s/it] 28%|██▊ | 144/520 [09:20<23:50, 3.80s/it] {'loss': 1.2282, 'grad_norm': 0.0009775723709469976, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:20<23:50, 3.80s/it] 28%|██▊ | 145/520 [09:24<23:52, 3.82s/it] {'loss': 1.155, 'grad_norm': 0.0008417061656488095, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:24<23:52, 3.82s/it] 28%|██▊ | 146/520 [09:28<23:50, 3.83s/it] {'loss': 1.298, 'grad_norm': 0.000876039056514726, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:28<23:50, 3.83s/it] 28%|██▊ | 147/520 [09:32<23:47, 3.83s/it] {'loss': 1.202, 'grad_norm': 0.0008952365507801239, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:32<23:47, 3.83s/it] 28%|██▊ | 148/520 [09:36<23:43, 3.83s/it] {'loss': 1.2241, 'grad_norm': 0.000854057123835549, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:36<23:43, 3.83s/it] 29%|██▊ | 149/520 [09:39<23:21, 3.78s/it] {'loss': 1.1743, 'grad_norm': 0.0009029841174975638, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:39<23:21, 3.78s/it] 29%|██▉ | 150/520 [09:43<22:59, 3.73s/it] {'loss': 1.3834, 'grad_norm': 0.0008883177134046105, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:43<22:59, 3.73s/it] 29%|██▉ | 151/520 [09:47<22:47, 3.71s/it] {'loss': 1.2161, 'grad_norm': 0.0009156846033110292, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:47<22:47, 3.71s/it] 29%|██▉ | 152/520 [09:50<22:35, 3.68s/it] {'loss': 1.1865, 'grad_norm': 0.0009321822431571972, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:50<22:35, 3.68s/it] 29%|██▉ | 153/520 [09:54<22:24, 3.66s/it] {'loss': 1.2174, 'grad_norm': 0.0008648226329757996, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:54<22:24, 3.66s/it] 30%|██▉ | 154/520 [09:58<22:16, 3.65s/it] {'loss': 1.3012, 'grad_norm': 0.000848630866017157, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:58<22:16, 3.65s/it] 30%|██▉ | 155/520 [10:01<22:17, 3.66s/it] {'loss': 1.216, 'grad_norm': 0.0009017596394663571, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:01<22:17, 3.66s/it] 30%|███ | 156/520 [10:05<22:14, 3.67s/it] {'loss': 1.2389, 'grad_norm': 0.00097508291850649, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [10:05<22:14, 3.67s/it] 30%|███ | 157/520 [10:09<22:07, 3.66s/it] {'loss': 1.2926, 'grad_norm': 0.0008351547255799535, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:09<22:07, 3.66s/it] 30%|███ | 158/520 [10:12<22:05, 3.66s/it] {'loss': 1.2188, 'grad_norm': 0.0008670127901695885, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:12<22:05, 3.66s/it] 31%|███ | 159/520 [10:16<22:00, 3.66s/it] {'loss': 1.2669, 'grad_norm': 0.0008742956609933359, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:16<22:00, 3.66s/it] 31%|███ | 160/520 [10:20<21:58, 3.66s/it] {'loss': 1.2737, 'grad_norm': 0.0009125502422741313, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:20<21:58, 3.66s/it] 31%|███ | 161/520 [10:23<21:55, 3.66s/it] {'loss': 1.2498, 'grad_norm': 0.0008808144182361863, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:23<21:55, 3.66s/it] 31%|███ | 162/520 [10:27<21:56, 3.68s/it] {'loss': 1.2339, 'grad_norm': 0.0008299169075291071, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:27<21:56, 3.68s/it] 31%|███▏ | 163/520 [10:31<21:49, 3.67s/it] {'loss': 1.1498, 'grad_norm': 0.0009832613371355508, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:31<21:49, 3.67s/it] 32%|███▏ | 164/520 [10:34<21:44, 3.66s/it] {'loss': 1.1137, 'grad_norm': 0.0008295191432808926, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:34<21:44, 3.66s/it] 32%|███▏ | 165/520 [10:38<21:39, 3.66s/it] {'loss': 1.265, 'grad_norm': 0.0008478393637384307, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:38<21:39, 3.66s/it] 32%|███▏ | 166/520 [10:41<21:32, 3.65s/it] {'loss': 1.2276, 'grad_norm': 0.0009294207171520542, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:41<21:32, 3.65s/it] 32%|███▏ | 167/520 [10:45<21:33, 3.66s/it] {'loss': 1.2205, 'grad_norm': 0.000846869562452288, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:45<21:33, 3.66s/it] 32%|███▏ | 168/520 [10:49<21:29, 3.66s/it] {'loss': 1.1663, 'grad_norm': 0.0008145034392012127, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:49<21:29, 3.66s/it] 32%|███▎ | 169/520 [10:53<21:30, 3.68s/it] {'loss': 1.2421, 'grad_norm': 0.0008273740643187817, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:53<21:30, 3.68s/it] 33%|███▎ | 170/520 [10:56<21:28, 3.68s/it] {'loss': 1.1887, 'grad_norm': 0.00072287271830973, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:56<21:28, 3.68s/it] 33%|███▎ | 171/520 [11:00<21:18, 3.66s/it] {'loss': 1.1777, 'grad_norm': 0.0009093015048629491, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:00<21:18, 3.66s/it] 33%|███▎ | 172/520 [11:04<21:20, 3.68s/it] {'loss': 1.2551, 'grad_norm': 0.0008384589950296387, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:04<21:20, 3.68s/it] 33%|███▎ | 173/520 [11:07<21:20, 3.69s/it] {'loss': 1.1884, 'grad_norm': 0.0008370216643489635, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:07<21:20, 3.69s/it] 33%|███▎ | 174/520 [11:11<21:15, 3.69s/it] {'loss': 1.2404, 'grad_norm': 0.0008566809231396028, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:11<21:15, 3.69s/it] 34%|███▎ | 175/520 [11:15<21:09, 3.68s/it] {'loss': 1.1595, 'grad_norm': 0.0007956771105697457, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:15<21:09, 3.68s/it] 34%|███▍ | 176/520 [11:18<21:03, 3.67s/it] {'loss': 1.2652, 'grad_norm': 0.000840849631299678, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:18<21:03, 3.67s/it] 34%|███▍ | 177/520 [11:22<21:00, 3.68s/it] {'loss': 1.1442, 'grad_norm': 0.0008471348837031712, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:22<21:00, 3.68s/it] 34%|███▍ | 178/520 [11:26<20:54, 3.67s/it] {'loss': 1.2269, 'grad_norm': 0.0009384338739488748, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:26<20:54, 3.67s/it] 34%|███▍ | 179/520 [11:29<20:47, 3.66s/it] {'loss': 1.2991, 'grad_norm': 0.0007974505255090035, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:29<20:47, 3.66s/it] 35%|███▍ | 180/520 [11:33<20:44, 3.66s/it] {'loss': 1.2186, 'grad_norm': 0.0008686569602165417, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:33<20:44, 3.66s/it] 35%|███▍ | 181/520 [11:37<20:39, 3.66s/it] {'loss': 1.1988, 'grad_norm': 0.0007747055412722102, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:37<20:39, 3.66s/it] 35%|███▌ | 182/520 [11:40<20:34, 3.65s/it] {'loss': 1.2099, 'grad_norm': 0.0008757927569265525, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:40<20:34, 3.65s/it] 35%|███▌ | 183/520 [11:44<20:29, 3.65s/it] {'loss': 1.2301, 'grad_norm': 0.000829015507117091, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:44<20:29, 3.65s/it] 35%|███▌ | 184/520 [11:48<20:30, 3.66s/it] {'loss': 1.1652, 'grad_norm': 0.0009260859832867215, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:48<20:30, 3.66s/it] 36%|███▌ | 185/520 [11:51<20:26, 3.66s/it] {'loss': 1.2987, 'grad_norm': 0.0008322214200725353, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:51<20:26, 3.66s/it] 36%|███▌ | 186/520 [11:55<20:21, 3.66s/it] {'loss': 1.1852, 'grad_norm': 0.0008675787793447239, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:55<20:21, 3.66s/it] 36%|███▌ | 187/520 [11:58<20:14, 3.65s/it] {'loss': 1.189, 'grad_norm': 0.0009770510617664598, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:58<20:14, 3.65s/it] 36%|███▌ | 188/520 [12:02<20:11, 3.65s/it] {'loss': 1.2767, 'grad_norm': 0.0009055753650709, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:02<20:11, 3.65s/it] 36%|███▋ | 189/520 [12:06<20:11, 3.66s/it] {'loss': 1.2774, 'grad_norm': 0.000790254043271807, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:06<20:11, 3.66s/it] 37%|███▋ | 190/520 [12:09<20:07, 3.66s/it] {'loss': 1.2009, 'grad_norm': 0.0008934139916963991, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:09<20:07, 3.66s/it] 37%|███▋ | 191/520 [12:13<20:05, 3.66s/it] {'loss': 1.1633, 'grad_norm': 0.0008195508693768104, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:13<20:05, 3.66s/it] 37%|███▋ | 192/520 [12:17<20:03, 3.67s/it] {'loss': 1.237, 'grad_norm': 0.0008020598406126793, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:17<20:03, 3.67s/it] 37%|███▋ | 193/520 [12:20<19:59, 3.67s/it] {'loss': 1.1943, 'grad_norm': 0.000906967785297533, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:20<19:59, 3.67s/it] 37%|███▋ | 194/520 [12:24<19:55, 3.67s/it] {'loss': 1.0931, 'grad_norm': 0.0007355925200412105, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:24<19:55, 3.67s/it] 38%|███▊ | 195/520 [12:28<19:51, 3.67s/it] {'loss': 1.2599, 'grad_norm': 0.0008379688128767544, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:28<19:51, 3.67s/it] 38%|███▊ | 196/520 [12:31<19:46, 3.66s/it] {'loss': 1.2363, 'grad_norm': 0.0009282456968062933, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:31<19:46, 3.66s/it] 38%|███▊ | 197/520 [12:35<19:46, 3.67s/it] {'loss': 1.1857, 'grad_norm': 0.0008455052612068559, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:35<19:46, 3.67s/it] 38%|███▊ | 198/520 [12:39<19:40, 3.66s/it] {'loss': 1.2533, 'grad_norm': 0.0009117865895942536, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:39<19:40, 3.66s/it] 38%|███▊ | 199/520 [12:42<19:39, 3.67s/it] {'loss': 1.1708, 'grad_norm': 0.0008493591471977064, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:42<19:39, 3.67s/it] 38%|███▊ | 200/520 [12:46<19:35, 3.67s/it] {'loss': 1.147, 'grad_norm': 0.0008325271205426402, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:46<19:35, 3.67s/it] 39%|███▊ | 201/520 [12:50<19:34, 3.68s/it] {'loss': 1.1712, 'grad_norm': 0.0007289298592590362, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:50<19:34, 3.68s/it] 39%|███▉ | 202/520 [12:54<19:53, 3.75s/it] {'loss': 1.1822, 'grad_norm': 0.0008520899035995657, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:54<19:53, 3.75s/it] 39%|███▉ | 203/520 [12:58<20:00, 3.79s/it] {'loss': 1.2248, 'grad_norm': 0.0008768770731704351, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:58<20:00, 3.79s/it] 39%|███▉ | 204/520 [13:02<20:14, 3.84s/it] {'loss': 1.2415, 'grad_norm': 0.0008685620537500674, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:02<20:14, 3.84s/it] 39%|███▉ | 205/520 [13:06<20:17, 3.86s/it] {'loss': 1.1688, 'grad_norm': 0.0007981093797432493, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:06<20:17, 3.86s/it] 40%|███▉ | 206/520 [13:09<20:14, 3.87s/it] {'loss': 1.2704, 'grad_norm': 0.0008182461427671642, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:09<20:14, 3.87s/it] 40%|███▉ | 207/520 [13:13<20:05, 3.85s/it] {'loss': 1.1358, 'grad_norm': 0.0007248105682494105, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:13<20:05, 3.85s/it] 40%|████ | 208/520 [13:17<19:47, 3.81s/it] {'loss': 1.2626, 'grad_norm': 0.0009362942956438421, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:17<19:47, 3.81s/it] 40%|████ | 209/520 [13:21<19:28, 3.76s/it] {'loss': 1.1812, 'grad_norm': 0.0007862563461922883, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:21<19:28, 3.76s/it] 40%|████ | 210/520 [13:24<19:12, 3.72s/it] {'loss': 1.2458, 'grad_norm': 0.0008368658087227738, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:24<19:12, 3.72s/it] 41%|████ | 211/520 [13:28<19:06, 3.71s/it] {'loss': 1.2481, 'grad_norm': 0.0007728217560630618, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:28<19:06, 3.71s/it] 41%|████ | 212/520 [13:32<18:57, 3.69s/it] {'loss': 1.2487, 'grad_norm': 0.0008122852455758685, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:32<18:57, 3.69s/it] 41%|████ | 213/520 [13:35<18:51, 3.69s/it] {'loss': 1.1967, 'grad_norm': 0.0009346171171963079, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:35<18:51, 3.69s/it] 41%|████ | 214/520 [13:39<18:53, 3.70s/it] {'loss': 1.1902, 'grad_norm': 0.0008529884137049932, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:39<18:53, 3.70s/it] 41%|████▏ | 215/520 [13:43<19:11, 3.78s/it] {'loss': 1.1076, 'grad_norm': 0.0008007755250241167, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:43<19:11, 3.78s/it] 42%|████▏ | 216/520 [13:47<19:28, 3.84s/it] {'loss': 1.1138, 'grad_norm': 0.000829994365996546, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:47<19:28, 3.84s/it] 42%|████▏ | 217/520 [13:51<19:33, 3.87s/it] {'loss': 1.2438, 'grad_norm': 0.0008734901024384049, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:51<19:33, 3.87s/it] 42%|████▏ | 218/520 [13:55<19:35, 3.89s/it] {'loss': 1.2142, 'grad_norm': 0.0009174992905815275, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:55<19:35, 3.89s/it] 42%|████▏ | 219/520 [13:59<19:32, 3.90s/it] {'loss': 1.2329, 'grad_norm': 0.0007740809090275804, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:59<19:32, 3.90s/it] 42%|████▏ | 220/520 [14:03<19:29, 3.90s/it] {'loss': 1.1536, 'grad_norm': 0.0007715677136466766, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:03<19:29, 3.90s/it] 42%|████▎ | 221/520 [14:07<19:28, 3.91s/it] {'loss': 1.2233, 'grad_norm': 0.0008203128191087808, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:07<19:28, 3.91s/it] 43%|████▎ | 222/520 [14:10<19:01, 3.83s/it] {'loss': 1.1708, 'grad_norm': 0.000827967932240071, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:10<19:01, 3.83s/it] 43%|████▎ | 223/520 [14:14<18:56, 3.83s/it] {'loss': 1.1634, 'grad_norm': 0.0007960434147011932, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:14<18:56, 3.83s/it] 43%|████▎ | 224/520 [14:18<18:52, 3.82s/it] {'loss': 1.2093, 'grad_norm': 0.0007594144818245321, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:18<18:52, 3.82s/it] 43%|████▎ | 225/520 [14:22<18:46, 3.82s/it] {'loss': 1.1622, 'grad_norm': 0.0008220177204798004, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:22<18:46, 3.82s/it] 43%|████▎ | 226/520 [14:25<18:27, 3.77s/it] {'loss': 1.2642, 'grad_norm': 0.0008120489554310319, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:25<18:27, 3.77s/it] 44%|████▎ | 227/520 [14:29<18:10, 3.72s/it] {'loss': 1.2515, 'grad_norm': 0.0008008786104912802, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:29<18:10, 3.72s/it] 44%|████▍ | 228/520 [14:33<18:02, 3.71s/it] {'loss': 1.2461, 'grad_norm': 0.0008221627025343902, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:33<18:02, 3.71s/it] 44%|████▍ | 229/520 [14:36<17:51, 3.68s/it] {'loss': 1.226, 'grad_norm': 0.0007765309837643599, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:36<17:51, 3.68s/it] 44%|████▍ | 230/520 [14:40<17:45, 3.67s/it] {'loss': 1.1183, 'grad_norm': 0.0008045709083637701, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:40<17:45, 3.67s/it] 44%|████▍ | 231/520 [14:44<17:41, 3.67s/it] {'loss': 1.1848, 'grad_norm': 0.0007617572839394342, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:44<17:41, 3.67s/it] 45%|████▍ | 232/520 [14:47<17:35, 3.66s/it] {'loss': 1.289, 'grad_norm': 0.0008393335035686828, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:47<17:35, 3.66s/it] 45%|████▍ | 233/520 [14:51<17:30, 3.66s/it] {'loss': 1.1765, 'grad_norm': 0.0008707989478183595, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:51<17:30, 3.66s/it] 45%|████▌ | 234/520 [14:54<17:27, 3.66s/it] {'loss': 1.1388, 'grad_norm': 0.0008594035634187121, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:54<17:27, 3.66s/it] 45%|████▌ | 235/520 [14:58<17:21, 3.66s/it] {'loss': 1.185, 'grad_norm': 0.0008216726093722208, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:58<17:21, 3.66s/it] 45%|████▌ | 236/520 [15:02<17:19, 3.66s/it] {'loss': 1.2565, 'grad_norm': 0.0007714030114897676, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:02<17:19, 3.66s/it] 46%|████▌ | 237/520 [15:05<17:14, 3.66s/it] {'loss': 1.2601, 'grad_norm': 0.0008076049439883066, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:05<17:14, 3.66s/it] 46%|████▌ | 238/520 [15:09<17:09, 3.65s/it] {'loss': 1.1918, 'grad_norm': 0.0008367664904958761, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:09<17:09, 3.65s/it] 46%|████▌ | 239/520 [15:13<17:08, 3.66s/it] {'loss': 1.26, 'grad_norm': 0.0008587362781383736, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:13<17:08, 3.66s/it] 46%|████▌ | 240/520 [15:16<17:05, 3.66s/it] {'loss': 1.0842, 'grad_norm': 0.0007507170764405721, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:16<17:05, 3.66s/it] 46%|████▋ | 241/520 [15:20<17:01, 3.66s/it] {'loss': 1.164, 'grad_norm': 0.000821097787679649, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:20<17:01, 3.66s/it] 47%|████▋ | 242/520 [15:24<16:57, 3.66s/it] {'loss': 1.1728, 'grad_norm': 0.0007785666040486662, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:24<16:57, 3.66s/it] 47%|████▋ | 243/520 [15:27<17:00, 3.69s/it] {'loss': 1.1734, 'grad_norm': 0.0008354064703816929, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:27<17:00, 3.69s/it] 47%|████▋ | 244/520 [15:31<17:09, 3.73s/it] {'loss': 1.2709, 'grad_norm': 0.0008222794484107682, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:31<17:09, 3.73s/it] 47%|████▋ | 245/520 [15:35<17:13, 3.76s/it] {'loss': 1.1553, 'grad_norm': 0.000857485362424714, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:35<17:13, 3.76s/it] 47%|████▋ | 246/520 [15:39<17:03, 3.74s/it] {'loss': 1.2763, 'grad_norm': 0.0008233936431460567, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:39<17:03, 3.74s/it] 48%|████▊ | 247/520 [15:42<16:52, 3.71s/it] {'loss': 1.3272, 'grad_norm': 0.0008651403153723014, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:42<16:52, 3.71s/it] 48%|████▊ | 248/520 [15:46<16:43, 3.69s/it] {'loss': 1.1495, 'grad_norm': 0.0008150040310336985, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:46<16:43, 3.69s/it] 48%|████▊ | 249/520 [15:50<16:35, 3.68s/it] {'loss': 1.2368, 'grad_norm': 0.0008262750370412674, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:50<16:35, 3.68s/it] 48%|████▊ | 250/520 [15:53<16:32, 3.68s/it] {'loss': 1.1846, 'grad_norm': 0.0008767445686339375, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:53<16:32, 3.68s/it] 48%|████▊ | 251/520 [15:57<16:28, 3.68s/it] {'loss': 1.2494, 'grad_norm': 0.0007670709895556922, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:57<16:28, 3.68s/it] 48%|████▊ | 252/520 [16:01<16:25, 3.68s/it] {'loss': 1.179, 'grad_norm': 0.000762274020356144, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:01<16:25, 3.68s/it] 49%|████▊ | 253/520 [16:05<16:35, 3.73s/it] {'loss': 1.2339, 'grad_norm': 0.0008867943409998164, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:05<16:35, 3.73s/it] 49%|████▉ | 254/520 [16:08<16:32, 3.73s/it] {'loss': 1.1778, 'grad_norm': 0.0007803503082919255, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:08<16:32, 3.73s/it] 49%|████▉ | 255/520 [16:12<16:23, 3.71s/it] {'loss': 1.1828, 'grad_norm': 0.0008653704420256749, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:12<16:23, 3.71s/it] 49%|████▉ | 256/520 [16:16<16:13, 3.69s/it] {'loss': 1.2383, 'grad_norm': 0.0008651629156652792, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:16<16:13, 3.69s/it] 49%|████▉ | 257/520 [16:19<16:06, 3.67s/it] {'loss': 1.2161, 'grad_norm': 0.0008400872523808829, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:19<16:06, 3.67s/it] 50%|████▉ | 258/520 [16:23<16:01, 3.67s/it] {'loss': 1.2187, 'grad_norm': 0.0007170411318404652, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:23<16:01, 3.67s/it] 50%|████▉ | 259/520 [16:27<16:09, 3.71s/it] {'loss': 1.2944, 'grad_norm': 0.0009100684696044033, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:27<16:09, 3.71s/it] 50%|█████ | 260/520 [16:31<16:13, 3.75s/it] {'loss': 1.2243, 'grad_norm': 0.0006779090267961328, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:31<16:13, 3.75s/it] 50%|█████ | 261/520 [16:34<16:19, 3.78s/it] {'loss': 1.1699, 'grad_norm': 0.0007915692985669951, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:34<16:19, 3.78s/it] 50%|█████ | 262/520 [16:38<16:18, 3.79s/it] {'loss': 1.1602, 'grad_norm': 0.0008191493574532726, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:38<16:18, 3.79s/it] 51%|█████ | 263/520 [16:42<16:17, 3.80s/it] {'loss': 1.1986, 'grad_norm': 0.000817163109144531, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:42<16:17, 3.80s/it] 51%|█████ | 264/520 [16:46<16:18, 3.82s/it] {'loss': 1.2494, 'grad_norm': 0.0007845695537680922, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:46<16:18, 3.82s/it] 51%|█████ | 265/520 [16:50<16:15, 3.83s/it] {'loss': 1.1698, 'grad_norm': 0.0008926030176193274, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:50<16:15, 3.83s/it] 51%|█████ | 266/520 [16:54<16:14, 3.84s/it] {'loss': 1.0553, 'grad_norm': 0.0007262989549535476, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:54<16:14, 3.84s/it] 51%|█████▏ | 267/520 [16:58<16:12, 3.84s/it] {'loss': 1.167, 'grad_norm': 0.0007934223136475025, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:58<16:12, 3.84s/it] 52%|█████▏ | 268/520 [17:01<16:07, 3.84s/it] {'loss': 1.2901, 'grad_norm': 0.0007978611493165481, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:01<16:07, 3.84s/it] 52%|█████▏ | 269/520 [17:05<16:02, 3.83s/it] {'loss': 1.2719, 'grad_norm': 0.0008538216602712786, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:05<16:02, 3.83s/it] 52%|█████▏ | 270/520 [17:09<16:01, 3.84s/it] {'loss': 1.1335, 'grad_norm': 0.0007719387463561027, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:09<16:01, 3.84s/it] 52%|█████▏ | 271/520 [17:13<15:55, 3.84s/it] {'loss': 1.2479, 'grad_norm': 0.0008369711148888091, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:13<15:55, 3.84s/it] 52%|█████▏ | 272/520 [17:17<15:51, 3.84s/it] {'loss': 1.1463, 'grad_norm': 0.0007686085394439831, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:17<15:51, 3.84s/it] 52%|█████▎ | 273/520 [17:21<15:45, 3.83s/it] {'loss': 1.2536, 'grad_norm': 0.0007815079892308704, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:21<15:45, 3.83s/it] 53%|█████▎ | 274/520 [17:24<15:40, 3.83s/it] {'loss': 1.2344, 'grad_norm': 0.0008596103667607623, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:24<15:40, 3.83s/it] 53%|█████▎ | 275/520 [17:28<15:37, 3.83s/it] {'loss': 1.1794, 'grad_norm': 0.0008062764388500925, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:28<15:37, 3.83s/it] 53%|█████▎ | 276/520 [17:32<15:35, 3.84s/it] {'loss': 1.2348, 'grad_norm': 0.0008694733705002787, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:32<15:35, 3.84s/it] 53%|█████▎ | 277/520 [17:36<15:32, 3.84s/it] {'loss': 1.2645, 'grad_norm': 0.0007364018424354168, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:36<15:32, 3.84s/it] 53%|█████▎ | 278/520 [17:40<15:30, 3.85s/it] {'loss': 1.1372, 'grad_norm': 0.0007513549773080265, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:40<15:30, 3.85s/it] 54%|█████▎ | 279/520 [17:44<15:24, 3.84s/it] {'loss': 1.1405, 'grad_norm': 0.000829508281779863, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:44<15:24, 3.84s/it] 54%|█████▍ | 280/520 [17:47<15:22, 3.84s/it] {'loss': 1.1642, 'grad_norm': 0.0008841158620200055, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:47<15:22, 3.84s/it] 54%|█████▍ | 281/520 [17:51<15:16, 3.83s/it] {'loss': 1.2674, 'grad_norm': 0.0008425839459287382, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:51<15:16, 3.83s/it] 54%|█████▍ | 282/520 [17:55<15:11, 3.83s/it] {'loss': 1.1481, 'grad_norm': 0.0007742852843055203, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:55<15:11, 3.83s/it] 54%|█████▍ | 283/520 [17:59<15:09, 3.84s/it] {'loss': 1.281, 'grad_norm': 0.000889945930726661, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:59<15:09, 3.84s/it] 55%|█████▍ | 284/520 [18:03<15:07, 3.85s/it] {'loss': 1.1472, 'grad_norm': 0.0008426905951695069, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:03<15:07, 3.85s/it] 55%|█████▍ | 285/520 [18:07<15:05, 3.85s/it] {'loss': 1.1705, 'grad_norm': 0.0008215941755854238, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:07<15:05, 3.85s/it] 55%|█████▌ | 286/520 [18:10<14:59, 3.84s/it] {'loss': 1.0531, 'grad_norm': 0.0008045881198827675, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:10<14:59, 3.84s/it] 55%|█████▌ | 287/520 [18:14<14:54, 3.84s/it] {'loss': 1.281, 'grad_norm': 0.0007956895828758924, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:14<14:54, 3.84s/it] 55%|█████▌ | 288/520 [18:18<14:49, 3.83s/it] {'loss': 1.3049, 'grad_norm': 0.0007831074534031206, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:18<14:49, 3.83s/it] 56%|█████▌ | 289/520 [18:22<14:36, 3.80s/it] {'loss': 1.1879, 'grad_norm': 0.000779133884198346, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:22<14:36, 3.80s/it] 56%|█████▌ | 290/520 [18:25<14:24, 3.76s/it] {'loss': 1.1137, 'grad_norm': 0.0007459049467059695, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:25<14:24, 3.76s/it] 56%|█████▌ | 291/520 [18:29<14:14, 3.73s/it] {'loss': 1.1517, 'grad_norm': 0.00080178993600821, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:29<14:14, 3.73s/it] 56%|█████▌ | 292/520 [18:33<14:09, 3.73s/it] {'loss': 1.2069, 'grad_norm': 0.000799654094356229, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:33<14:09, 3.73s/it] 56%|█████▋ | 293/520 [18:37<14:02, 3.71s/it] {'loss': 1.1591, 'grad_norm': 0.0008341801506892795, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:37<14:02, 3.71s/it] 57%|█████▋ | 294/520 [18:40<13:58, 3.71s/it] {'loss': 1.1795, 'grad_norm': 0.0008827026968397397, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:40<13:58, 3.71s/it] 57%|█████▋ | 295/520 [18:44<13:52, 3.70s/it] {'loss': 1.1828, 'grad_norm': 0.0007396429324885952, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:44<13:52, 3.70s/it] 57%|█████▋ | 296/520 [18:48<13:45, 3.69s/it] {'loss': 1.1277, 'grad_norm': 0.0008179725611122918, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:48<13:45, 3.69s/it] 57%|█████▋ | 297/520 [18:51<13:38, 3.67s/it] {'loss': 1.2577, 'grad_norm': 0.0008877773157086015, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:51<13:38, 3.67s/it] 57%|█████▋ | 298/520 [18:55<13:31, 3.66s/it] {'loss': 1.2199, 'grad_norm': 0.0007628868196069245, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:55<13:31, 3.66s/it] 57%|█████▊ | 299/520 [18:58<13:27, 3.65s/it] {'loss': 1.2274, 'grad_norm': 0.0007625167055328331, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:59<13:27, 3.65s/it] 58%|█████▊ | 300/520 [19:02<13:23, 3.65s/it] {'loss': 1.2645, 'grad_norm': 0.0008024327960982423, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:02<13:23, 3.65s/it] 58%|█████▊ | 301/520 [19:06<13:16, 3.64s/it] {'loss': 1.2561, 'grad_norm': 0.0008235980126027893, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:06<13:16, 3.64s/it] 58%|█████▊ | 302/520 [19:09<13:13, 3.64s/it] {'loss': 1.231, 'grad_norm': 0.0007867022222808647, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:09<13:13, 3.64s/it] 58%|█████▊ | 303/520 [19:13<13:13, 3.65s/it] {'loss': 1.1763, 'grad_norm': 0.0009040238133255459, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:13<13:13, 3.65s/it] 58%|█████▊ | 304/520 [19:17<13:11, 3.66s/it] {'loss': 1.1441, 'grad_norm': 0.000824308578037298, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:17<13:11, 3.66s/it] 59%|█████▊ | 305/520 [19:20<13:06, 3.66s/it] {'loss': 1.282, 'grad_norm': 0.0009432329564956743, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:20<13:06, 3.66s/it] 59%|█████▉ | 306/520 [19:24<13:05, 3.67s/it] {'loss': 1.226, 'grad_norm': 0.0008317415460981195, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:24<13:05, 3.67s/it] 59%|█████▉ | 307/520 [19:28<12:59, 3.66s/it] {'loss': 1.1698, 'grad_norm': 0.0007757653488373562, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:28<12:59, 3.66s/it] 59%|█████▉ | 308/520 [19:31<12:56, 3.66s/it] {'loss': 1.279, 'grad_norm': 0.0007654539221768389, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:31<12:56, 3.66s/it] 59%|█████▉ | 309/520 [19:35<13:18, 3.78s/it] {'loss': 1.1712, 'grad_norm': 0.0007849062641714721, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:35<13:18, 3.78s/it] 60%|█████▉ | 310/520 [19:39<13:05, 3.74s/it] {'loss': 1.1482, 'grad_norm': 0.000817773975195827, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:39<13:05, 3.74s/it] 60%|█████▉ | 311/520 [19:43<12:56, 3.72s/it] {'loss': 1.1336, 'grad_norm': 0.0007942215576476527, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:43<12:56, 3.72s/it] 60%|██████ | 312/520 [19:46<12:50, 3.70s/it] {'loss': 1.1224, 'grad_norm': 0.0008143061481539082, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:46<12:50, 3.70s/it] 60%|██████ | 313/520 [19:50<12:43, 3.69s/it] {'loss': 1.103, 'grad_norm': 0.0007322003931490156, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:50<12:43, 3.69s/it] 60%|██████ | 314/520 [19:54<13:11, 3.84s/it] {'loss': 1.1399, 'grad_norm': 0.0007852100068008763, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:54<13:11, 3.84s/it] 61%|██████ | 315/520 [19:58<12:56, 3.79s/it] {'loss': 1.1915, 'grad_norm': 0.0008854833395599842, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:58<12:56, 3.79s/it] 61%|██████ | 316/520 [20:02<13:11, 3.88s/it] {'loss': 1.1267, 'grad_norm': 0.0008289650845382434, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:02<13:11, 3.88s/it] 61%|██████ | 317/520 [20:06<12:54, 3.81s/it] {'loss': 1.1298, 'grad_norm': 0.0007176548053010041, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:06<12:54, 3.81s/it] 61%|██████ | 318/520 [20:09<12:41, 3.77s/it] {'loss': 1.2428, 'grad_norm': 0.0008691876495752597, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:09<12:41, 3.77s/it] 61%|██████▏ | 319/520 [20:13<12:53, 3.85s/it] {'loss': 1.1262, 'grad_norm': 0.0007342053401309579, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:13<12:53, 3.85s/it] 62%|██████▏ | 320/520 [20:17<12:37, 3.79s/it] {'loss': 1.0724, 'grad_norm': 0.0007810402148464245, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:17<12:37, 3.79s/it] 62%|██████▏ | 321/520 [20:21<12:25, 3.75s/it] {'loss': 1.2649, 'grad_norm': 0.0007883545081618997, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:21<12:25, 3.75s/it] 62%|██████▏ | 322/520 [20:24<12:17, 3.72s/it] {'loss': 1.093, 'grad_norm': 0.0007629417436430116, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:24<12:17, 3.72s/it] 62%|██████▏ | 323/520 [20:28<12:09, 3.70s/it] {'loss': 1.162, 'grad_norm': 0.0008555901546454049, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:28<12:09, 3.70s/it] 62%|██████▏ | 324/520 [20:32<12:05, 3.70s/it] {'loss': 1.2069, 'grad_norm': 0.0008037801690633558, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:32<12:05, 3.70s/it] 62%|██████▎ | 325/520 [20:35<11:59, 3.69s/it] {'loss': 1.2072, 'grad_norm': 0.0008760453751506228, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:35<11:59, 3.69s/it] 63%|██████▎ | 326/520 [20:39<11:55, 3.69s/it] {'loss': 1.2026, 'grad_norm': 0.0008442227116059734, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:39<11:55, 3.69s/it] 63%|██████▎ | 327/520 [20:43<11:53, 3.70s/it] {'loss': 1.1989, 'grad_norm': 0.0008418755680267448, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:43<11:53, 3.70s/it] 63%|██████▎ | 328/520 [20:46<11:48, 3.69s/it] {'loss': 1.2446, 'grad_norm': 0.0008288799715115922, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:47<11:48, 3.69s/it] 63%|██████▎ | 329/520 [20:50<11:44, 3.69s/it] {'loss': 1.1256, 'grad_norm': 0.0007061378265198452, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:50<11:44, 3.69s/it] 63%|██████▎ | 330/520 [20:54<11:40, 3.69s/it] {'loss': 1.2021, 'grad_norm': 0.0007499404848389664, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:54<11:40, 3.69s/it] 64%|██████▎ | 331/520 [20:58<11:34, 3.68s/it] {'loss': 1.1612, 'grad_norm': 0.0008091904083369932, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:58<11:34, 3.68s/it] 64%|██████▍ | 332/520 [21:01<11:30, 3.67s/it] {'loss': 1.2243, 'grad_norm': 0.00073002700383981, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:01<11:30, 3.67s/it] 64%|██████▍ | 333/520 [21:05<11:25, 3.67s/it] {'loss': 1.2971, 'grad_norm': 0.0008656883812130255, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:05<11:25, 3.67s/it] 64%|██████▍ | 334/520 [21:09<11:22, 3.67s/it] {'loss': 1.2076, 'grad_norm': 0.0008480030588040619, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:09<11:22, 3.67s/it] 64%|██████▍ | 335/520 [21:12<11:18, 3.67s/it] {'loss': 1.2065, 'grad_norm': 0.0007656429141670887, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:12<11:18, 3.67s/it] 65%|██████▍ | 336/520 [21:16<11:13, 3.66s/it] {'loss': 1.1138, 'grad_norm': 0.0008628290354213494, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:16<11:13, 3.66s/it] 65%|██████▍ | 337/520 [21:19<11:10, 3.66s/it] {'loss': 1.104, 'grad_norm': 0.0007889145022674352, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:19<11:10, 3.66s/it] 65%|██████▌ | 338/520 [21:23<11:08, 3.67s/it] {'loss': 1.2146, 'grad_norm': 0.0007995538249410616, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:23<11:08, 3.67s/it] 65%|██████▌ | 339/520 [21:27<11:03, 3.66s/it] {'loss': 1.1588, 'grad_norm': 0.0008146249015366844, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:27<11:03, 3.66s/it] 65%|██████▌ | 340/520 [21:30<11:00, 3.67s/it] {'loss': 1.1464, 'grad_norm': 0.0007649152178419126, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:31<11:00, 3.67s/it] 66%|██████▌ | 341/520 [21:34<10:56, 3.67s/it] {'loss': 1.1732, 'grad_norm': 0.000837599575025728, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:34<10:56, 3.67s/it] 66%|██████▌ | 342/520 [21:38<10:51, 3.66s/it] {'loss': 1.1996, 'grad_norm': 0.0009744847527519522, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:38<10:51, 3.66s/it] 66%|██████▌ | 343/520 [21:42<10:49, 3.67s/it] {'loss': 1.1518, 'grad_norm': 0.0006671535469045233, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:42<10:49, 3.67s/it] 66%|██████▌ | 344/520 [21:45<10:47, 3.68s/it] {'loss': 1.1321, 'grad_norm': 0.0007291539856174542, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:45<10:47, 3.68s/it] 66%|██████▋ | 345/520 [21:49<10:44, 3.68s/it] {'loss': 1.233, 'grad_norm': 0.0008110289380607393, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:49<10:44, 3.68s/it] 67%|██████▋ | 346/520 [21:53<10:41, 3.69s/it] {'loss': 1.1682, 'grad_norm': 0.0007852848726013647, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:53<10:41, 3.69s/it] 67%|██████▋ | 347/520 [21:56<10:41, 3.71s/it] {'loss': 1.1477, 'grad_norm': 0.0007254890326632399, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:56<10:41, 3.71s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:00<10:35, 3.70s/it] {'loss': 1.1095, 'grad_norm': 0.0009299025883812754, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:00<10:35, 3.70s/it] 67%|██████▋ | 349/520 [22:04<10:32, 3.70s/it] {'loss': 1.1447, 'grad_norm': 0.000805604997208881, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:04<10:32, 3.70s/it] 67%|██████▋ | 350/520 [22:07<10:27, 3.69s/it] {'loss': 1.1851, 'grad_norm': 0.0008031529725264729, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:07<10:27, 3.69s/it] 68%|██████▊ | 351/520 [22:11<10:24, 3.70s/it] {'loss': 1.0967, 'grad_norm': 0.0007478124711324068, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:11<10:24, 3.70s/it] 68%|██████▊ | 352/520 [22:15<10:21, 3.70s/it] {'loss': 1.2123, 'grad_norm': 0.000750261820135803, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:15<10:21, 3.70s/it] 68%|██████▊ | 353/520 [22:19<10:18, 3.70s/it] {'loss': 1.1378, 'grad_norm': 0.0006679837211319085, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:19<10:18, 3.70s/it] 68%|██████▊ | 354/520 [22:22<10:13, 3.69s/it] {'loss': 1.2355, 'grad_norm': 0.0007369618825017302, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:22<10:13, 3.69s/it] 68%|██████▊ | 355/520 [22:26<10:06, 3.68s/it] {'loss': 1.1571, 'grad_norm': 0.0007733866176107121, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:26<10:06, 3.68s/it] 68%|██████▊ | 356/520 [22:29<10:01, 3.67s/it] {'loss': 1.1589, 'grad_norm': 0.0008027331898494304, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:29<10:01, 3.67s/it] 69%|██████▊ | 357/520 [22:33<09:56, 3.66s/it] {'loss': 1.1928, 'grad_norm': 0.0007624510174659525, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:33<09:56, 3.66s/it] 69%|██████▉ | 358/520 [22:37<09:52, 3.66s/it] {'loss': 1.1221, 'grad_norm': 0.0007994192550403061, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:37<09:52, 3.66s/it] 69%|██████▉ | 359/520 [22:40<09:50, 3.66s/it] {'loss': 1.1783, 'grad_norm': 0.0007943492270368738, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:40<09:50, 3.66s/it] 69%|██████▉ | 360/520 [22:44<09:48, 3.68s/it] {'loss': 1.1902, 'grad_norm': 0.000788915821112091, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:44<09:48, 3.68s/it] 69%|██████▉ | 361/520 [22:48<09:43, 3.67s/it] {'loss': 1.2021, 'grad_norm': 0.0007139455081514212, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:48<09:43, 3.67s/it] 70%|██████▉ | 362/520 [22:51<09:37, 3.66s/it] {'loss': 1.1741, 'grad_norm': 0.0008468277883502102, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:51<09:37, 3.66s/it] 70%|██████▉ | 363/520 [22:55<09:35, 3.67s/it] {'loss': 1.2018, 'grad_norm': 0.0008000769860397199, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:55<09:35, 3.67s/it] 70%|███████ | 364/520 [22:59<09:31, 3.66s/it] {'loss': 1.218, 'grad_norm': 0.0007863096551916831, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:59<09:31, 3.66s/it] 70%|███████ | 365/520 [23:03<09:31, 3.69s/it] {'loss': 1.2501, 'grad_norm': 0.0008022515920050826, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:03<09:31, 3.69s/it] 70%|███████ | 366/520 [23:06<09:24, 3.67s/it] {'loss': 1.2144, 'grad_norm': 0.0007773726483593139, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:06<09:24, 3.67s/it] 71%|███████ | 367/520 [23:10<09:21, 3.67s/it] {'loss': 1.2129, 'grad_norm': 0.0008177758059856736, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:10<09:21, 3.67s/it] 71%|███████ | 368/520 [23:13<09:16, 3.66s/it] {'loss': 1.0691, 'grad_norm': 0.0007938811433899851, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:13<09:16, 3.66s/it] 71%|███████ | 369/520 [23:17<09:12, 3.66s/it] {'loss': 1.1758, 'grad_norm': 0.0007046733001495229, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:17<09:12, 3.66s/it] 71%|███████ | 370/520 [23:21<09:08, 3.65s/it] {'loss': 1.1308, 'grad_norm': 0.0007485949075257764, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:21<09:08, 3.65s/it] 71%|███████▏ | 371/520 [23:24<09:04, 3.66s/it] {'loss': 1.1219, 'grad_norm': 0.0008453891883973071, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:24<09:04, 3.66s/it] 72%|███████▏ | 372/520 [23:28<09:01, 3.66s/it] {'loss': 1.2478, 'grad_norm': 0.0007125743646535289, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:28<09:01, 3.66s/it] 72%|███████▏ | 373/520 [23:32<08:57, 3.65s/it] {'loss': 1.1342, 'grad_norm': 0.0008436856502624353, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:32<08:57, 3.65s/it] 72%|███████▏ | 374/520 [23:35<08:53, 3.65s/it] {'loss': 1.2158, 'grad_norm': 0.0008227602297251978, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:35<08:53, 3.65s/it] 72%|███████▏ | 375/520 [23:39<08:49, 3.65s/it] {'loss': 1.1319, 'grad_norm': 0.0007758480872363328, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:39<08:49, 3.65s/it] 72%|███████▏ | 376/520 [23:43<08:46, 3.66s/it] {'loss': 1.2356, 'grad_norm': 0.0007719355264827961, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:43<08:46, 3.66s/it] 72%|███████▎ | 377/520 [23:46<08:42, 3.65s/it] {'loss': 1.169, 'grad_norm': 0.00089427657653854, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:46<08:42, 3.65s/it] 73%|███████▎ | 378/520 [23:50<08:38, 3.65s/it] {'loss': 1.2331, 'grad_norm': 0.0007587193772775786, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:50<08:38, 3.65s/it] 73%|███████▎ | 379/520 [23:54<08:34, 3.65s/it] {'loss': 1.2018, 'grad_norm': 0.0007521046994952903, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:54<08:34, 3.65s/it] 73%|███████▎ | 380/520 [23:57<08:29, 3.64s/it] {'loss': 1.2209, 'grad_norm': 0.0008018562275510834, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:57<08:29, 3.64s/it] 73%|███████▎ | 381/520 [24:01<08:26, 3.65s/it] {'loss': 1.2084, 'grad_norm': 0.000745393908795325, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:01<08:26, 3.65s/it] 73%|███████▎ | 382/520 [24:05<08:26, 3.67s/it] {'loss': 1.1923, 'grad_norm': 0.0007330398273839523, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:05<08:26, 3.67s/it] 74%|███████▎ | 383/520 [24:08<08:22, 3.67s/it] {'loss': 1.0527, 'grad_norm': 0.0008530069472915166, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:08<08:22, 3.67s/it] 74%|███████▍ | 384/520 [24:12<08:16, 3.65s/it] {'loss': 1.225, 'grad_norm': 0.0006927805320787076, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:12<08:16, 3.65s/it] 74%|███████▍ | 385/520 [24:16<08:12, 3.65s/it] {'loss': 1.1904, 'grad_norm': 0.0007297813460671193, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:16<08:12, 3.65s/it] 74%|███████▍ | 386/520 [24:19<08:08, 3.65s/it] {'loss': 1.1459, 'grad_norm': 0.0006825804963353166, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:19<08:08, 3.65s/it] 74%|███████▍ | 387/520 [24:23<08:05, 3.65s/it] {'loss': 1.245, 'grad_norm': 0.0007613092274741649, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:23<08:05, 3.65s/it] 75%|███████▍ | 388/520 [24:27<08:01, 3.65s/it] {'loss': 1.1041, 'grad_norm': 0.0007573889295333075, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:27<08:01, 3.65s/it] 75%|███████▍ | 389/520 [24:30<07:57, 3.64s/it] {'loss': 1.1498, 'grad_norm': 0.0008842166353401236, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:30<07:57, 3.64s/it] 75%|███████▌ | 390/520 [24:34<07:56, 3.66s/it] {'loss': 1.2182, 'grad_norm': 0.0007728987910241661, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:34<07:56, 3.66s/it] 75%|███████▌ | 391/520 [24:37<07:51, 3.66s/it] {'loss': 1.2782, 'grad_norm': 0.0008018312962433765, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:37<07:51, 3.66s/it] 75%|███████▌ | 392/520 [24:41<07:48, 3.66s/it] {'loss': 1.1048, 'grad_norm': 0.0007726043463942269, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:41<07:48, 3.66s/it] 76%|███████▌ | 393/520 [24:45<07:44, 3.65s/it] {'loss': 1.1021, 'grad_norm': 0.0006812374002234191, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:45<07:44, 3.65s/it] 76%|███████▌ | 394/520 [24:48<07:39, 3.65s/it] {'loss': 1.174, 'grad_norm': 0.0008720447569060723, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:48<07:39, 3.65s/it] 76%|███████▌ | 395/520 [24:52<07:35, 3.65s/it] {'loss': 1.1386, 'grad_norm': 0.0008367551125315463, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:52<07:35, 3.65s/it] 76%|███████▌ | 396/520 [24:56<07:31, 3.64s/it] {'loss': 1.2188, 'grad_norm': 0.0008260032150672541, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:56<07:31, 3.64s/it] 76%|███████▋ | 397/520 [24:59<07:30, 3.67s/it] {'loss': 1.1912, 'grad_norm': 0.000769286703161534, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:59<07:30, 3.67s/it] 77%|███████▋ | 398/520 [25:03<07:34, 3.72s/it] {'loss': 1.1878, 'grad_norm': 0.0008263141257360141, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:03<07:34, 3.72s/it] 77%|███████▋ | 399/520 [25:07<07:30, 3.72s/it] {'loss': 1.1373, 'grad_norm': 0.0007450462407338312, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:07<07:30, 3.72s/it] 77%|███████▋ | 400/520 [25:11<07:24, 3.71s/it] {'loss': 1.1672, 'grad_norm': 0.0007060709217451467, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:11<07:24, 3.71s/it] 77%|███████▋ | 401/520 [25:14<07:19, 3.70s/it] {'loss': 1.0295, 'grad_norm': 0.0008622938874197774, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:14<07:19, 3.70s/it] 77%|███████▋ | 402/520 [25:18<07:14, 3.68s/it] {'loss': 1.1552, 'grad_norm': 0.0008017509541769693, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:18<07:14, 3.68s/it] 78%|███████▊ | 403/520 [25:22<07:08, 3.67s/it] {'loss': 1.1765, 'grad_norm': 0.0008594208134390979, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:22<07:08, 3.67s/it] 78%|███████▊ | 404/520 [25:25<07:05, 3.67s/it] {'loss': 1.0893, 'grad_norm': 0.0009084902446210824, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:25<07:05, 3.67s/it] 78%|███████▊ | 405/520 [25:29<07:01, 3.67s/it] {'loss': 1.1502, 'grad_norm': 0.0008263245970590217, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:29<07:01, 3.67s/it] 78%|███████▊ | 406/520 [25:33<06:57, 3.66s/it] {'loss': 1.0721, 'grad_norm': 0.0009253537651454668, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:33<06:57, 3.66s/it] 78%|███████▊ | 407/520 [25:36<06:53, 3.66s/it] {'loss': 1.2612, 'grad_norm': 0.000814429395102695, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:36<06:53, 3.66s/it] 78%|███████▊ | 408/520 [25:40<06:48, 3.65s/it] {'loss': 1.1726, 'grad_norm': 0.0008599184551170142, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:40<06:48, 3.65s/it] 79%|███████▊ | 409/520 [25:44<06:45, 3.65s/it] {'loss': 1.2888, 'grad_norm': 0.0008498676684426844, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:44<06:45, 3.65s/it] 79%|███████▉ | 410/520 [25:47<06:41, 3.65s/it] {'loss': 1.0303, 'grad_norm': 0.0007967160114646696, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:47<06:41, 3.65s/it] 79%|███████▉ | 411/520 [25:51<06:37, 3.65s/it] {'loss': 1.2694, 'grad_norm': 0.0008587593256371364, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:51<06:37, 3.65s/it] 79%|███████▉ | 412/520 [25:54<06:34, 3.65s/it] {'loss': 1.1725, 'grad_norm': 0.0007983785264023255, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:55<06:34, 3.65s/it] 79%|███████▉ | 413/520 [25:58<06:31, 3.65s/it] {'loss': 1.1679, 'grad_norm': 0.000903589642286116, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:58<06:31, 3.65s/it] 80%|███████▉ | 414/520 [26:02<06:31, 3.70s/it] {'loss': 0.9741, 'grad_norm': 0.0006484677271141809, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:02<06:31, 3.70s/it] 80%|███████▉ | 415/520 [26:06<06:27, 3.69s/it] {'loss': 1.1575, 'grad_norm': 0.0007426671108619795, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:06<06:27, 3.69s/it] 80%|████████ | 416/520 [26:09<06:23, 3.69s/it] {'loss': 1.0641, 'grad_norm': 0.000836564090017658, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:09<06:23, 3.69s/it] 80%|████████ | 417/520 [26:13<06:20, 3.69s/it] {'loss': 1.2274, 'grad_norm': 0.0007884902299831069, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:13<06:20, 3.69s/it] 80%|████████ | 418/520 [26:17<06:15, 3.68s/it] {'loss': 1.2216, 'grad_norm': 0.0007440299059111893, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:17<06:15, 3.68s/it] 81%|████████ | 419/520 [26:20<06:11, 3.67s/it] {'loss': 1.2156, 'grad_norm': 0.0008742498377890437, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:20<06:11, 3.67s/it] 81%|████████ | 420/520 [26:24<06:12, 3.72s/it] {'loss': 1.1092, 'grad_norm': 0.0008436106396378452, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:24<06:12, 3.72s/it] 81%|████████ | 421/520 [26:28<06:07, 3.71s/it] {'loss': 1.0427, 'grad_norm': 0.0008157495448095776, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:28<06:07, 3.71s/it] 81%|████████ | 422/520 [26:32<06:02, 3.70s/it] {'loss': 1.1651, 'grad_norm': 0.0008387498734655959, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:32<06:02, 3.70s/it] 81%|████████▏ | 423/520 [26:35<05:58, 3.69s/it] {'loss': 1.1319, 'grad_norm': 0.0008796026192567407, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:35<05:58, 3.69s/it] 82%|████████▏ | 424/520 [26:39<05:54, 3.69s/it] {'loss': 1.2526, 'grad_norm': 0.0007530332351964562, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:39<05:54, 3.69s/it] 82%|████████▏ | 425/520 [26:43<05:50, 3.69s/it] {'loss': 1.1509, 'grad_norm': 0.0008042217436391259, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:43<05:50, 3.69s/it] 82%|████████▏ | 426/520 [26:46<05:49, 3.71s/it] {'loss': 1.1826, 'grad_norm': 0.0010028238666954451, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:46<05:49, 3.71s/it] 82%|████████▏ | 427/520 [26:50<05:48, 3.75s/it] {'loss': 1.088, 'grad_norm': 0.000771003649541181, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:50<05:48, 3.75s/it] 82%|████████▏ | 428/520 [26:54<05:46, 3.76s/it] {'loss': 1.0751, 'grad_norm': 0.0008658988424021996, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:54<05:46, 3.76s/it] 82%|████████▎ | 429/520 [26:58<05:43, 3.77s/it] {'loss': 1.1709, 'grad_norm': 0.0007974654511803661, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:58<05:43, 3.77s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:02<05:40, 3.78s/it] {'loss': 1.1698, 'grad_norm': 0.0007499464824769609, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:02<05:40, 3.78s/it] 83%|████████▎ | 431/520 [27:05<05:37, 3.79s/it] {'loss': 1.1365, 'grad_norm': 0.0007670354356441421, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:05<05:37, 3.79s/it] 83%|████████▎ | 432/520 [27:09<05:34, 3.80s/it] {'loss': 1.081, 'grad_norm': 0.0008139503734015596, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:09<05:34, 3.80s/it] 83%|████████▎ | 433/520 [27:13<05:29, 3.79s/it] {'loss': 1.2151, 'grad_norm': 0.000785030690619525, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:13<05:29, 3.79s/it] 83%|████████▎ | 434/520 [27:17<05:26, 3.80s/it] {'loss': 0.9632, 'grad_norm': 0.0008127311117189394, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:17<05:26, 3.80s/it] 84%|████████▎ | 435/520 [27:21<05:22, 3.80s/it] {'loss': 1.2443, 'grad_norm': 0.0008639692214238616, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:21<05:22, 3.80s/it] 84%|████████▍ | 436/520 [27:24<05:19, 3.80s/it] {'loss': 1.0518, 'grad_norm': 0.0008141181177171868, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:24<05:19, 3.80s/it] 84%|████████▍ | 437/520 [27:28<05:14, 3.79s/it] {'loss': 1.2638, 'grad_norm': 0.0008177750344378373, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:28<05:14, 3.79s/it] 84%|████████▍ | 438/520 [27:32<05:10, 3.79s/it] {'loss': 1.0859, 'grad_norm': 0.0007996765883462826, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:32<05:10, 3.79s/it] 84%|████████▍ | 439/520 [27:36<05:07, 3.79s/it] {'loss': 1.1215, 'grad_norm': 0.0006605624922082523, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:36<05:07, 3.79s/it] 85%|████████▍ | 440/520 [27:40<05:02, 3.79s/it] {'loss': 1.1236, 'grad_norm': 0.0008395566367780025, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:40<05:02, 3.79s/it] 85%|████████▍ | 441/520 [27:43<04:59, 3.79s/it] {'loss': 1.1371, 'grad_norm': 0.0007694596931425255, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:43<04:59, 3.79s/it] 85%|████████▌ | 442/520 [27:47<04:55, 3.78s/it] {'loss': 1.187, 'grad_norm': 0.0008712334662674244, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:47<04:55, 3.78s/it] 85%|████████▌ | 443/520 [27:51<04:51, 3.79s/it] {'loss': 1.1961, 'grad_norm': 0.0007786300317337883, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:51<04:51, 3.79s/it] 85%|████████▌ | 444/520 [27:55<04:45, 3.75s/it] {'loss': 1.161, 'grad_norm': 0.0007078210923909709, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:55<04:45, 3.75s/it] 86%|████████▌ | 445/520 [27:58<04:38, 3.71s/it] {'loss': 1.0912, 'grad_norm': 0.0007624803091591618, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:58<04:38, 3.71s/it] 86%|████████▌ | 446/520 [28:02<04:32, 3.69s/it] {'loss': 1.2118, 'grad_norm': 0.0007089385777013728, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:02<04:32, 3.69s/it] 86%|████████▌ | 447/520 [28:05<04:28, 3.68s/it] {'loss': 1.1622, 'grad_norm': 0.0007788304916606836, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:05<04:28, 3.68s/it] 86%|████████▌ | 448/520 [28:09<04:23, 3.67s/it] {'loss': 1.1609, 'grad_norm': 0.0008385026957477143, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:09<04:23, 3.67s/it] 86%|████████▋ | 449/520 [28:13<04:22, 3.69s/it] {'loss': 1.1714, 'grad_norm': 0.0007787548830017892, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:13<04:22, 3.69s/it] 87%|████████▋ | 450/520 [28:17<04:19, 3.71s/it] {'loss': 1.187, 'grad_norm': 0.0008264177896286971, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:17<04:19, 3.71s/it] 87%|████████▋ | 451/520 [28:20<04:17, 3.73s/it] {'loss': 1.1881, 'grad_norm': 0.000824046581061994, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:20<04:17, 3.73s/it] 87%|████████▋ | 452/520 [28:24<04:14, 3.75s/it] {'loss': 1.218, 'grad_norm': 0.0007374536963396919, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:24<04:14, 3.75s/it] 87%|████████▋ | 453/520 [28:28<04:11, 3.75s/it] {'loss': 1.1922, 'grad_norm': 0.0007342803357351287, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:28<04:11, 3.75s/it] 87%|████████▋ | 454/520 [28:32<04:06, 3.73s/it] {'loss': 1.0957, 'grad_norm': 0.0007862002831570123, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:32<04:06, 3.73s/it] 88%|████████▊ | 455/520 [28:35<04:00, 3.71s/it] {'loss': 1.2374, 'grad_norm': 0.0007954023660542643, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:35<04:00, 3.71s/it] 88%|████████▊ | 456/520 [28:39<03:56, 3.69s/it] {'loss': 1.1715, 'grad_norm': 0.0008031540290063092, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:39<03:56, 3.69s/it] 88%|████████▊ | 457/520 [28:43<03:51, 3.68s/it] {'loss': 1.0873, 'grad_norm': 0.000688758959014055, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:43<03:51, 3.68s/it] 88%|████████▊ | 458/520 [28:46<03:48, 3.69s/it] {'loss': 1.2885, 'grad_norm': 0.0008729940230982836, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:46<03:48, 3.69s/it] 88%|████████▊ | 459/520 [28:50<03:45, 3.69s/it] {'loss': 1.2205, 'grad_norm': 0.0007784927528498071, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:50<03:45, 3.69s/it] 88%|████████▊ | 460/520 [28:54<03:42, 3.70s/it] {'loss': 1.1113, 'grad_norm': 0.0007730646260223797, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:54<03:42, 3.70s/it] 89%|████████▊ | 461/520 [28:57<03:38, 3.71s/it] {'loss': 1.1723, 'grad_norm': 0.000623446538002517, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:57<03:38, 3.71s/it] 89%|████████▉ | 462/520 [29:01<03:34, 3.70s/it] {'loss': 1.2592, 'grad_norm': 0.0007735895625138552, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:01<03:34, 3.70s/it] 89%|████████▉ | 463/520 [29:05<03:30, 3.69s/it] {'loss': 1.0811, 'grad_norm': 0.0008333112874018136, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:05<03:30, 3.69s/it] 89%|████████▉ | 464/520 [29:09<03:29, 3.73s/it] {'loss': 1.202, 'grad_norm': 0.0008125594854538857, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:09<03:29, 3.73s/it] 89%|████████▉ | 465/520 [29:12<03:26, 3.76s/it] {'loss': 1.3065, 'grad_norm': 0.0008267066740886236, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:12<03:26, 3.76s/it] 90%|████████▉ | 466/520 [29:16<03:26, 3.83s/it] {'loss': 1.1984, 'grad_norm': 0.000724207276371412, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:16<03:26, 3.83s/it] 90%|████████▉ | 467/520 [29:20<03:24, 3.85s/it] {'loss': 1.1502, 'grad_norm': 0.0007345388743187976, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:20<03:24, 3.85s/it] 90%|█████████ | 468/520 [29:24<03:21, 3.87s/it] {'loss': 1.1686, 'grad_norm': 0.0009049737233021379, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:24<03:21, 3.87s/it] 90%|█████████ | 469/520 [29:28<03:17, 3.88s/it] {'loss': 1.2342, 'grad_norm': 0.0008643863012962952, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:28<03:17, 3.88s/it] 90%|█████████ | 470/520 [29:32<03:14, 3.88s/it] {'loss': 1.1073, 'grad_norm': 0.000734773091529036, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:32<03:14, 3.88s/it] 91%|█████████ | 471/520 [29:36<03:10, 3.90s/it] {'loss': 1.1356, 'grad_norm': 0.0008544276357692813, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:36<03:10, 3.90s/it] 91%|█████████ | 472/520 [29:40<03:07, 3.90s/it] {'loss': 1.1065, 'grad_norm': 0.0007509238896551803, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:40<03:07, 3.90s/it] 91%|█████████ | 473/520 [29:44<03:03, 3.90s/it] {'loss': 1.1759, 'grad_norm': 0.0008159354004377244, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:44<03:03, 3.90s/it] 91%|█████████ | 474/520 [29:48<02:59, 3.90s/it] {'loss': 1.186, 'grad_norm': 0.0007430650573215541, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:48<02:59, 3.90s/it] 91%|█████████▏| 475/520 [29:52<02:55, 3.90s/it] {'loss': 1.1009, 'grad_norm': 0.0007264682538563958, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:52<02:55, 3.90s/it] 92%|█████████▏| 476/520 [29:55<02:51, 3.90s/it] {'loss': 1.159, 'grad_norm': 0.000830730052840252, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:55<02:51, 3.90s/it] 92%|█████████▏| 477/520 [29:59<02:47, 3.89s/it] {'loss': 1.1564, 'grad_norm': 0.0008880396369470325, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:59<02:47, 3.89s/it] 92%|█████████▏| 478/520 [30:03<02:43, 3.89s/it] {'loss': 1.0966, 'grad_norm': 0.0007812093511455621, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:03<02:43, 3.89s/it] 92%|█████████▏| 479/520 [30:07<02:39, 3.88s/it] {'loss': 1.1523, 'grad_norm': 0.0008247761270041283, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:07<02:39, 3.88s/it] 92%|█████████▏| 480/520 [30:11<02:35, 3.88s/it] {'loss': 1.1711, 'grad_norm': 0.0007393762946257934, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:11<02:35, 3.88s/it] 92%|█████████▎| 481/520 [30:15<02:31, 3.88s/it] {'loss': 1.1587, 'grad_norm': 0.0007097614136677842, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:15<02:31, 3.88s/it] 93%|█████████▎| 482/520 [30:19<02:27, 3.88s/it] {'loss': 1.1819, 'grad_norm': 0.0007300619068386235, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:19<02:27, 3.88s/it] 93%|█████████▎| 483/520 [30:23<02:23, 3.88s/it] {'loss': 1.1691, 'grad_norm': 0.0008078913448683025, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:23<02:23, 3.88s/it] 93%|█████████▎| 484/520 [30:26<02:19, 3.88s/it] {'loss': 1.1757, 'grad_norm': 0.0008209390959489744, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:26<02:19, 3.88s/it] 93%|█████████▎| 485/520 [30:30<02:15, 3.88s/it] {'loss': 1.129, 'grad_norm': 0.0007715994701206676, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:30<02:15, 3.88s/it] 93%|█████████▎| 486/520 [30:34<02:11, 3.88s/it] {'loss': 1.2486, 'grad_norm': 0.000820052812897577, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:34<02:11, 3.88s/it] 94%|█████████▎| 487/520 [30:38<02:08, 3.88s/it] {'loss': 1.109, 'grad_norm': 0.0007641335259744192, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:38<02:08, 3.88s/it] 94%|█████████▍| 488/520 [30:42<02:04, 3.88s/it] {'loss': 1.0516, 'grad_norm': 0.0008127229935573189, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:42<02:04, 3.88s/it] 94%|█████████▍| 489/520 [30:46<02:00, 3.88s/it] {'loss': 1.1853, 'grad_norm': 0.0006843691118600901, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:46<02:00, 3.88s/it] 94%|█████████▍| 490/520 [30:50<01:56, 3.88s/it] {'loss': 1.1689, 'grad_norm': 0.0008036451542664516, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:50<01:56, 3.88s/it] 94%|█████████▍| 491/520 [30:54<01:52, 3.88s/it] {'loss': 1.1363, 'grad_norm': 0.0008181717034730436, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:54<01:52, 3.88s/it] 95%|█████████▍| 492/520 [30:57<01:48, 3.87s/it] {'loss': 1.2494, 'grad_norm': 0.000812684774303894, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:57<01:48, 3.87s/it] 95%|█████████▍| 493/520 [31:01<01:44, 3.88s/it] {'loss': 1.177, 'grad_norm': 0.0007859209805943788, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:01<01:44, 3.88s/it] 95%|█████████▌| 494/520 [31:05<01:40, 3.85s/it] {'loss': 1.1792, 'grad_norm': 0.0007320664955156908, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:05<01:40, 3.85s/it] 95%|█████████▌| 495/520 [31:09<01:34, 3.79s/it] {'loss': 1.1594, 'grad_norm': 0.0008094594497729427, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:09<01:34, 3.79s/it] 95%|█████████▌| 496/520 [31:13<01:30, 3.76s/it] {'loss': 1.0816, 'grad_norm': 0.0008144602681143965, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:13<01:30, 3.76s/it] 96%|█████████▌| 497/520 [31:16<01:25, 3.73s/it] {'loss': 1.1114, 'grad_norm': 0.0006839642275808237, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:16<01:25, 3.73s/it] 96%|█████████▌| 498/520 [31:20<01:21, 3.70s/it] {'loss': 1.1475, 'grad_norm': 0.0007902288184551261, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:20<01:21, 3.70s/it] 96%|█████████▌| 499/520 [31:23<01:17, 3.70s/it] {'loss': 1.249, 'grad_norm': 0.0007406715218420413, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:23<01:17, 3.70s/it] 96%|█████████▌| 500/520 [31:27<01:13, 3.68s/it] {'loss': 1.2712, 'grad_norm': 0.0009332831184290465, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:27<01:13, 3.68s/it] 96%|█████████▋| 501/520 [31:31<01:09, 3.66s/it] {'loss': 1.1525, 'grad_norm': 0.00083742053240302, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:31<01:09, 3.66s/it] 97%|█████████▋| 502/520 [31:34<01:06, 3.67s/it] {'loss': 1.1849, 'grad_norm': 0.00076564232409825, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:34<01:06, 3.67s/it] 97%|█████████▋| 503/520 [31:38<01:02, 3.67s/it] {'loss': 1.1417, 'grad_norm': 0.0007839768573304983, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:38<01:02, 3.67s/it] 97%|█████████▋| 504/520 [31:42<00:58, 3.67s/it] {'loss': 1.1803, 'grad_norm': 0.000921428139271114, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:42<00:58, 3.67s/it] 97%|█████████▋| 505/520 [31:45<00:55, 3.67s/it] {'loss': 1.2091, 'grad_norm': 0.0008020484834940221, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:45<00:55, 3.67s/it] 97%|█████████▋| 506/520 [31:49<00:51, 3.67s/it] {'loss': 1.1403, 'grad_norm': 0.000832546230133315, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:49<00:51, 3.67s/it] 98%|█████████▊| 507/520 [31:53<00:47, 3.67s/it] {'loss': 1.2873, 'grad_norm': 0.0007291785207019547, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:53<00:47, 3.67s/it] 98%|█████████▊| 508/520 [31:56<00:44, 3.68s/it] {'loss': 1.2561, 'grad_norm': 0.000817865264517659, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:56<00:44, 3.68s/it] 98%|█████████▊| 509/520 [32:00<00:40, 3.67s/it] {'loss': 1.2303, 'grad_norm': 0.0007685738409703832, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:00<00:40, 3.67s/it] 98%|█████████▊| 510/520 [32:04<00:36, 3.67s/it] {'loss': 1.1757, 'grad_norm': 0.0008033523098201505, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:04<00:36, 3.67s/it] 98%|█████████▊| 511/520 [32:07<00:32, 3.66s/it] {'loss': 1.1417, 'grad_norm': 0.0007804026676623108, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:07<00:32, 3.66s/it] 98%|█████████▊| 512/520 [32:11<00:29, 3.66s/it] {'loss': 1.034, 'grad_norm': 0.0007855913741000638, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:11<00:29, 3.66s/it] 99%|█████████▊| 513/520 [32:15<00:25, 3.66s/it] {'loss': 1.2295, 'grad_norm': 0.0009296216165395716, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:15<00:25, 3.66s/it] 99%|█████████▉| 514/520 [32:19<00:22, 3.69s/it] {'loss': 1.1979, 'grad_norm': 0.0007322185936897681, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:19<00:22, 3.69s/it] 99%|█████████▉| 515/520 [32:22<00:18, 3.74s/it] {'loss': 1.2519, 'grad_norm': 0.000945446936589641, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:22<00:18, 3.74s/it] 99%|█████████▉| 516/520 [32:26<00:15, 3.78s/it] {'loss': 1.1643, 'grad_norm': 0.0007954837337344682, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:26<00:15, 3.78s/it] 99%|█████████▉| 517/520 [32:30<00:11, 3.79s/it] {'loss': 1.183, 'grad_norm': 0.0007515850515877379, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:30<00:11, 3.79s/it] 100%|█████████▉| 518/520 [32:34<00:07, 3.79s/it] {'loss': 1.168, 'grad_norm': 0.0008406333200564484, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:34<00:07, 3.79s/it] 100%|█████████▉| 519/520 [32:38<00:03, 3.80s/it] {'loss': 1.1521, 'grad_norm': 0.0007657136270965582, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:38<00:03, 3.80s/it] 100%|██████████| 520/520 [32:42<00:00, 4.03s/it] {'loss': 1.1523, 'grad_norm': 0.000700949692773842, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:42<00:00, 4.03s/it] {'train_runtime': 1962.7805, 'train_samples_per_second': 33.895, 'train_steps_per_second': 0.265, 'train_loss': 1.230701500062759, 'epoch': 1.0} + 100%|██████████| 520/520 [32:42<00:00, 4.03s/it] 100%|██████████| 520/520 [32:42<00:00, 3.77s/it] +[2025-10-13 17:30:20,276] [INFO] [launch.py:348:main] Process 908630 exits successfully. +[2025-10-13 17:30:20,276] [INFO] [launch.py:348:main] Process 908631 exits successfully. +[2025-10-13 17:30:20,276] [INFO] [launch.py:348:main] Process 908628 exits successfully. +[2025-10-13 17:30:20,277] [INFO] [launch.py:348:main] Process 908625 exits successfully. +[2025-10-13 17:30:21,278] [INFO] [launch.py:348:main] Process 908626 exits successfully. +[2025-10-13 17:30:21,279] [INFO] [launch.py:348:main] Process 908627 exits successfully. +[2025-10-13 17:30:21,279] [INFO] [launch.py:348:main] Process 908629 exits successfully. +[2025-10-13 17:30:25,284] [INFO] [launch.py:348:main] Process 908624 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_0.9_2e-1_connector-5.0_0.9_2e-1_ablation_20251013_165603.log +Timestamp: 2025-10-13 17:30:27 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation_20251013_173027.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation_20251013_173027.log new file mode 100644 index 0000000000000000000000000000000000000000..ad48d3720426270e2e1aa85d9a13c3b474e24910 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation_20251013_173027.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation_20251013_173027.log +Timestamp: 2025-10-13 17:30:27 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 17:30:30,467] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:33,201] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 17:30:33,203] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 1.1 --temperature_mlp_text 1.1 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 1.1 --temperature_mlp_vision 1.1 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 1.1 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 17:30:35,797] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:36,853] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 17:30:36,853] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 17:30:36,853] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 17:30:36,853] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 17:30:36,853] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 17:30:36,853] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 17:30:36,853] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 17:30:36,856] [INFO] [launch.py:253:main] process 928307 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 17:30:36,858] [INFO] [launch.py:253:main] process 928308 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 17:30:36,860] [INFO] [launch.py:253:main] process 928309 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 17:30:36,862] [INFO] [launch.py:253:main] process 928310 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 17:30:36,864] [INFO] [launch.py:253:main] process 928311 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 17:30:36,867] [INFO] [launch.py:253:main] process 928312 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 17:30:36,869] [INFO] [launch.py:253:main] process 928313 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 17:30:36,871] [INFO] [launch.py:253:main] process 928314 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 17:30:43,451] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:43,649] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:43,822] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:43,852] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:43,861] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 17:30:43,901] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:43,907] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:43,935] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:43,939] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 17:30:44,068] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 17:30:44,068] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 17:30:44,228] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 17:30:44,257] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 17:30:44,309] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 17:30:44,309] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 17:30:44,340] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 17:30:44,355] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.1, 'temperature_mlp': 1.1, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.1, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.1, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.1, + "temperature_mlp": 1.1, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:928307:928307 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928307:928307 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928307:928307 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:928307:928307 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:928307:928307 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:928307:928307 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:928309:928309 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:928309:928309 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928309:928309 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928309:928309 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:928309:928309 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:928309:928309 [2] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:928312:928312 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:928312:928312 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928312:928312 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928312:928312 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:928312:928312 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:928312:928312 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Using network Socket +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:928308:928308 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:928308:928308 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928308:928308 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928308:928308 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:928308:928308 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:928308:928308 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:928314:928314 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:928314:928314 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928314:928314 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928314:928314 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:928314:928314 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:928314:928314 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:928313:928313 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:928313:928313 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928313:928313 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928313:928313 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:928313:928313 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:928313:928313 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:928311:928311 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:928311:928311 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928311:928311 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928311:928311 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:928311:928311 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:928311:928311 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:928310:928310 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:928310:928310 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928310:928310 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928310:928310 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:928310:928310 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:928310:928310 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO ncclCommInitRank comm 0x563ce823c5f0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x21eee74a88ec61ad - Init START +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO ncclCommInitRank comm 0x560e1ed7a200 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x21eee74a88ec61ad - Init START +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO ncclCommInitRank comm 0x55ad11de00b0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x21eee74a88ec61ad - Init START +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO ncclCommInitRank comm 0x561720487020 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x21eee74a88ec61ad - Init START +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO ncclCommInitRank comm 0x560e457232a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x21eee74a88ec61ad - Init START +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO ncclCommInitRank comm 0x5613bb7ed920 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x21eee74a88ec61ad - Init START +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO ncclCommInitRank comm 0x55cb2ec83b60 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x21eee74a88ec61ad - Init START +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO ncclCommInitRank comm 0x557fd0659690 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x21eee74a88ec61ad - Init START +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO comm 0x561720487020 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO comm 0x560e457232a0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO comm 0x5613bb7ed920 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO comm 0x55cb2ec83b60 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO comm 0x557fd0659690 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO comm 0x55ad11de00b0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO comm 0x560e1ed7a200 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO comm 0x563ce823c5f0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:928312:929892 [5] NCCL INFO ncclCommInitRank comm 0x55ad11de00b0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x21eee74a88ec61ad - Init COMPLETE +ywang29-vrdb-test1-worker-0:928314:929898 [7] NCCL INFO ncclCommInitRank comm 0x55cb2ec83b60 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x21eee74a88ec61ad - Init COMPLETE +ywang29-vrdb-test1-worker-0:928310:929901 [3] NCCL INFO ncclCommInitRank comm 0x563ce823c5f0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x21eee74a88ec61ad - Init COMPLETE +ywang29-vrdb-test1-worker-0:928308:929897 [1] NCCL INFO ncclCommInitRank comm 0x560e457232a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x21eee74a88ec61ad - Init COMPLETE +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:928307:929878 [0] NCCL INFO ncclCommInitRank comm 0x5613bb7ed920 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x21eee74a88ec61ad - Init COMPLETE +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:928309:929879 [2] NCCL INFO ncclCommInitRank comm 0x561720487020 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x21eee74a88ec61ad - Init COMPLETE +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:928313:929899 [6] NCCL INFO ncclCommInitRank comm 0x557fd0659690 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x21eee74a88ec61ad - Init COMPLETE +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:928311:929900 [4] NCCL INFO ncclCommInitRank comm 0x560e1ed7a200 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x21eee74a88ec61ad - Init COMPLETE +[2025-10-13 17:31:30,767] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 17:31:32,503] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 17:31:50,266 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 17:31:50,270 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:004->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:928309:934896 [2] NCCL INFO ncclCommInitRank comm 0x7f598806afd0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xb83ba65bab90272b - Init COMPLETE +ywang29-vrdb-test1-worker-0:928314:934894 [7] NCCL INFO ncclCommInitRank comm 0x7f21d006ab60 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xb83ba65bab90272b - Init COMPLETE +ywang29-vrdb-test1-worker-0:928310:934898 [3] NCCL INFO ncclCommInitRank comm 0x7fb94c06b480 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xb83ba65bab90272b - Init COMPLETE +ywang29-vrdb-test1-worker-0:928312:934895 [5] NCCL INFO ncclCommInitRank comm 0x7f789006b0b0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xb83ba65bab90272b - Init COMPLETE +ywang29-vrdb-test1-worker-0:928308:934892 [1] NCCL INFO ncclCommInitRank comm 0x7f8ee006a950 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xb83ba65bab90272b - Init COMPLETE +ywang29-vrdb-test1-worker-0:928311:934897 [4] NCCL INFO ncclCommInitRank comm 0x7f63e806ae10 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xb83ba65bab90272b - Init COMPLETE +ywang29-vrdb-test1-worker-0:928313:934893 [6] NCCL INFO ncclCommInitRank comm 0x7f9ea406acc0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xb83ba65bab90272b - Init COMPLETE +ywang29-vrdb-test1-worker-0:928307:934891 [0] NCCL INFO ncclCommInitRank comm 0x7f7ae006b190 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xb83ba65bab90272b - Init COMPLETE + 0%| | 1/520 [00:14<2:04:08, 14.35s/it] {'loss': 2.0428, 'grad_norm': 0.006521646342616489, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:04:08, 14.35s/it] 0%| | 2/520 [00:18<1:10:25, 8.16s/it] {'loss': 2.0506, 'grad_norm': 0.007072428907969317, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:10:25, 8.16s/it] 1%| | 3/520 [00:21<53:12, 6.18s/it] {'loss': 2.1869, 'grad_norm': 0.008095445701277314, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<53:12, 6.18s/it] 1%| | 4/520 [00:25<44:42, 5.20s/it] {'loss': 2.0626, 'grad_norm': 0.006727490637415227, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<44:42, 5.20s/it] 1%| | 5/520 [00:29<39:56, 4.65s/it] {'loss': 1.7863, 'grad_norm': 0.003631762924773762, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<39:56, 4.65s/it] 1%| | 6/520 [00:33<36:55, 4.31s/it] {'loss': 1.4354, 'grad_norm': 0.0011371111066154856, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:33<36:55, 4.31s/it] 1%|▏ | 7/520 [00:36<34:49, 4.07s/it] {'loss': 1.5256, 'grad_norm': 0.0013021314036459058, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<34:49, 4.07s/it] 2%|▏ | 8/520 [00:40<35:11, 4.12s/it] {'loss': 1.5393, 'grad_norm': 0.0009235322283100118, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:11, 4.12s/it] 2%|▏ | 9/520 [00:44<35:07, 4.12s/it] {'loss': 1.5836, 'grad_norm': 0.0007273089503953001, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<35:07, 4.12s/it] 2%|▏ | 10/520 [00:48<34:09, 4.02s/it] {'loss': 1.4139, 'grad_norm': 0.0007779779184469213, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:09, 4.02s/it] 2%|▏ | 11/520 [00:52<33:35, 3.96s/it] {'loss': 1.4654, 'grad_norm': 0.0007091661133697961, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<33:35, 3.96s/it] 2%|▏ | 12/520 [00:56<33:44, 3.99s/it] {'loss': 1.3419, 'grad_norm': 0.0005683652948637934, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<33:44, 3.99s/it][2025-10-13 17:32:56,423] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<35:33, 4.21s/it] {'loss': 1.408, 'grad_norm': 0.0005400731524701191, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<35:33, 4.21s/it] 3%|▎ | 14/520 [01:05<35:14, 4.18s/it] {'loss': 1.4463, 'grad_norm': 0.0006125358476896207, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<35:14, 4.18s/it] 3%|▎ | 15/520 [01:09<35:00, 4.16s/it] {'loss': 1.3709, 'grad_norm': 0.0005022173216883037, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:09<35:00, 4.16s/it] 3%|▎ | 16/520 [01:13<34:42, 4.13s/it] {'loss': 1.3431, 'grad_norm': 0.0005936687985551389, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:13<34:42, 4.13s/it] 3%|▎ | 17/520 [01:17<34:33, 4.12s/it] {'loss': 1.4715, 'grad_norm': 0.0006381444494619854, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:17<34:33, 4.12s/it] 3%|▎ | 18/520 [01:21<34:04, 4.07s/it] {'loss': 1.3302, 'grad_norm': 0.0006505934948639159, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:21<34:04, 4.07s/it] 4%|▎ | 19/520 [01:25<33:25, 4.00s/it] {'loss': 1.3358, 'grad_norm': 0.0005714148668885572, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:25<33:25, 4.00s/it] 4%|▍ | 20/520 [01:29<32:55, 3.95s/it] {'loss': 1.3032, 'grad_norm': 0.0006114473264041549, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:29<32:55, 3.95s/it] 4%|▍ | 21/520 [01:33<32:31, 3.91s/it] {'loss': 1.3249, 'grad_norm': 0.0006212875643612719, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:33<32:31, 3.91s/it] 4%|▍ | 22/520 [01:36<31:48, 3.83s/it] {'loss': 1.4384, 'grad_norm': 0.0006081473605153059, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:36<31:48, 3.83s/it] 4%|▍ | 23/520 [01:40<31:13, 3.77s/it] {'loss': 1.3917, 'grad_norm': 0.0006766238387110909, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:40<31:13, 3.77s/it] 5%|▍ | 24/520 [01:44<31:26, 3.80s/it] {'loss': 1.3077, 'grad_norm': 0.0006204871042627594, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:44<31:26, 3.80s/it] 5%|▍ | 25/520 [01:48<31:29, 3.82s/it] {'loss': 1.3858, 'grad_norm': 0.0007053831031970914, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:48<31:29, 3.82s/it] 5%|▌ | 26/520 [01:52<32:03, 3.89s/it] {'loss': 1.3251, 'grad_norm': 0.0005122536626381898, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:52<32:03, 3.89s/it] 5%|▌ | 27/520 [01:56<32:31, 3.96s/it] {'loss': 1.255, 'grad_norm': 0.0005595671074264109, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:56<32:31, 3.96s/it] 5%|▌ | 28/520 [02:00<32:55, 4.01s/it] {'loss': 1.2884, 'grad_norm': 0.0006506607269128405, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [02:00<32:55, 4.01s/it] 6%|▌ | 29/520 [02:04<32:46, 4.00s/it] {'loss': 1.3077, 'grad_norm': 0.0006130846185546598, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:04<32:46, 4.00s/it] 6%|▌ | 30/520 [02:08<32:56, 4.03s/it] {'loss': 1.3712, 'grad_norm': 0.0005407463108569621, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:08<32:56, 4.03s/it] 6%|▌ | 31/520 [02:12<32:56, 4.04s/it] {'loss': 1.2694, 'grad_norm': 0.0005558171606052746, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:12<32:56, 4.04s/it] 6%|▌ | 32/520 [02:16<32:33, 4.00s/it] {'loss': 1.2049, 'grad_norm': 0.0006207068405234672, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:16<32:33, 4.00s/it] 6%|▋ | 33/520 [02:20<32:29, 4.00s/it] {'loss': 1.2667, 'grad_norm': 0.0006432131923846522, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:20<32:29, 4.00s/it] 7%|▋ | 34/520 [02:24<32:09, 3.97s/it] {'loss': 1.2592, 'grad_norm': 0.0006838329961048715, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:24<32:09, 3.97s/it] 7%|▋ | 35/520 [02:28<32:01, 3.96s/it] {'loss': 1.2707, 'grad_norm': 0.0007277424286492653, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:28<32:01, 3.96s/it] 7%|▋ | 36/520 [02:32<31:54, 3.96s/it] {'loss': 1.3619, 'grad_norm': 0.0006279732162398664, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:32<31:54, 3.96s/it] 7%|▋ | 37/520 [02:36<31:52, 3.96s/it] {'loss': 1.3452, 'grad_norm': 0.0005771410712617175, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:36<31:52, 3.96s/it] 7%|▋ | 38/520 [02:40<32:02, 3.99s/it] {'loss': 1.4333, 'grad_norm': 0.0006329402230663822, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:40<32:02, 3.99s/it] 8%|▊ | 39/520 [02:44<31:44, 3.96s/it] {'loss': 1.2977, 'grad_norm': 0.0007655576818878756, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:44<31:44, 3.96s/it] 8%|▊ | 40/520 [02:48<31:25, 3.93s/it] {'loss': 1.3277, 'grad_norm': 0.0005964782677435097, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:48<31:25, 3.93s/it] 8%|▊ | 41/520 [02:52<31:15, 3.92s/it] {'loss': 1.3074, 'grad_norm': 0.0006648041260157442, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:52<31:15, 3.92s/it] 8%|▊ | 42/520 [02:55<31:01, 3.89s/it] {'loss': 1.2987, 'grad_norm': 0.0008171191276909228, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:55<31:01, 3.89s/it] 8%|▊ | 43/520 [02:59<30:55, 3.89s/it] {'loss': 1.2361, 'grad_norm': 0.000580950480799821, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:59<30:55, 3.89s/it] 8%|▊ | 44/520 [03:03<30:44, 3.87s/it] {'loss': 1.3376, 'grad_norm': 0.0006581432424539014, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [03:03<30:44, 3.87s/it] 9%|▊ | 45/520 [03:07<30:30, 3.85s/it] {'loss': 1.3221, 'grad_norm': 0.0006972949068619798, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:07<30:30, 3.85s/it] 9%|▉ | 46/520 [03:11<30:19, 3.84s/it] {'loss': 1.377, 'grad_norm': 0.0006800450931779469, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:11<30:19, 3.84s/it] 9%|▉ | 47/520 [03:15<30:13, 3.83s/it] {'loss': 1.2929, 'grad_norm': 0.0007018992310036035, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:15<30:13, 3.83s/it] 9%|▉ | 48/520 [03:18<30:09, 3.83s/it] {'loss': 1.2965, 'grad_norm': 0.0008208267370128879, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:18<30:09, 3.83s/it] 9%|▉ | 49/520 [03:22<30:03, 3.83s/it] {'loss': 1.3269, 'grad_norm': 0.000748857063517952, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:22<30:03, 3.83s/it] 10%|▉ | 50/520 [03:26<29:59, 3.83s/it] {'loss': 1.3169, 'grad_norm': 0.0007046549269321437, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:26<29:59, 3.83s/it] 10%|▉ | 51/520 [03:30<29:54, 3.83s/it] {'loss': 1.2575, 'grad_norm': 0.000774485255612285, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:30<29:54, 3.83s/it] 10%|█ | 52/520 [03:34<29:51, 3.83s/it] {'loss': 1.3842, 'grad_norm': 0.0008034938010311201, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:34<29:51, 3.83s/it] 10%|█ | 53/520 [03:37<29:45, 3.82s/it] {'loss': 1.3627, 'grad_norm': 0.0007525751405970801, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:37<29:45, 3.82s/it] 10%|█ | 54/520 [03:41<29:41, 3.82s/it] {'loss': 1.2985, 'grad_norm': 0.0007280265923179678, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:41<29:41, 3.82s/it] 11%|█ | 55/520 [03:45<29:36, 3.82s/it] {'loss': 1.2566, 'grad_norm': 0.0008137238724410735, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:45<29:36, 3.82s/it] 11%|█ | 56/520 [03:49<29:33, 3.82s/it] {'loss': 1.3812, 'grad_norm': 0.0007497588496231547, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:49<29:33, 3.82s/it] 11%|█ | 57/520 [03:53<29:27, 3.82s/it] {'loss': 1.2439, 'grad_norm': 0.0008369785918525196, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:53<29:27, 3.82s/it] 11%|█ | 58/520 [03:57<29:24, 3.82s/it] {'loss': 1.4007, 'grad_norm': 0.0006542978058439783, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:57<29:24, 3.82s/it] 11%|█▏ | 59/520 [04:00<29:22, 3.82s/it] {'loss': 1.2118, 'grad_norm': 0.0007143217099184133, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [04:00<29:22, 3.82s/it] 12%|█▏ | 60/520 [04:04<29:19, 3.82s/it] {'loss': 1.3031, 'grad_norm': 0.0006997117009165903, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:04<29:19, 3.82s/it] 12%|█▏ | 61/520 [04:08<29:14, 3.82s/it] {'loss': 1.2864, 'grad_norm': 0.000762489155290545, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:08<29:14, 3.82s/it] 12%|█▏ | 62/520 [04:12<29:11, 3.82s/it] {'loss': 1.291, 'grad_norm': 0.0008261205789101886, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:12<29:11, 3.82s/it] 12%|█▏ | 63/520 [04:16<29:05, 3.82s/it] {'loss': 1.2872, 'grad_norm': 0.0007459392677251845, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:16<29:05, 3.82s/it] 12%|█▏ | 64/520 [04:19<28:51, 3.80s/it] {'loss': 1.3088, 'grad_norm': 0.0007723014653892609, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:19<28:51, 3.80s/it] 12%|█▎ | 65/520 [04:23<28:28, 3.75s/it] {'loss': 1.3178, 'grad_norm': 0.0008760253407995272, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:23<28:28, 3.75s/it] 13%|█▎ | 66/520 [04:27<28:06, 3.72s/it] {'loss': 1.2684, 'grad_norm': 0.0007391583645173611, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:27<28:06, 3.72s/it] 13%|█▎ | 67/520 [04:30<27:51, 3.69s/it] {'loss': 1.1846, 'grad_norm': 0.0007740218498882824, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:30<27:51, 3.69s/it] 13%|█▎ | 68/520 [04:34<27:39, 3.67s/it] {'loss': 1.2486, 'grad_norm': 0.0007774554535808106, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:34<27:39, 3.67s/it] 13%|█▎ | 69/520 [04:38<27:31, 3.66s/it] {'loss': 1.2342, 'grad_norm': 0.0008289002191859177, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:38<27:31, 3.66s/it] 13%|█▎ | 70/520 [04:41<27:23, 3.65s/it] {'loss': 1.2555, 'grad_norm': 0.0008283895320747535, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:41<27:23, 3.65s/it] 14%|█▎ | 71/520 [04:45<27:14, 3.64s/it] {'loss': 1.1995, 'grad_norm': 0.0007299630464946806, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:45<27:14, 3.64s/it] 14%|█▍ | 72/520 [04:48<27:14, 3.65s/it] {'loss': 1.3485, 'grad_norm': 0.0007739555769506431, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:48<27:14, 3.65s/it] 14%|█▍ | 73/520 [04:52<27:09, 3.65s/it] {'loss': 1.1804, 'grad_norm': 0.0007960638002587046, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:52<27:09, 3.65s/it] 14%|█▍ | 74/520 [04:56<27:02, 3.64s/it] {'loss': 1.2801, 'grad_norm': 0.0008406791425960369, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:56<27:02, 3.64s/it] 14%|█▍ | 75/520 [04:59<26:59, 3.64s/it] {'loss': 1.2066, 'grad_norm': 0.0007315587566954246, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:59<26:59, 3.64s/it] 15%|█▍ | 76/520 [05:03<26:51, 3.63s/it] {'loss': 1.3314, 'grad_norm': 0.0006651210848508586, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:03<26:51, 3.63s/it] 15%|█▍ | 77/520 [05:07<26:46, 3.63s/it] {'loss': 1.1289, 'grad_norm': 0.0008797696485189686, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:07<26:46, 3.63s/it] 15%|█▌ | 78/520 [05:10<26:46, 3.63s/it] {'loss': 1.2384, 'grad_norm': 0.000789751535130501, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:10<26:46, 3.63s/it] 15%|█▌ | 79/520 [05:14<26:40, 3.63s/it] {'loss': 1.2285, 'grad_norm': 0.0007922474142361941, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:14<26:40, 3.63s/it] 15%|█▌ | 80/520 [05:17<26:34, 3.62s/it] {'loss': 1.3138, 'grad_norm': 0.0008047608637812174, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:17<26:34, 3.62s/it] 16%|█▌ | 81/520 [05:21<26:31, 3.63s/it] {'loss': 1.3626, 'grad_norm': 0.001009439092235259, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:21<26:31, 3.63s/it] 16%|█▌ | 82/520 [05:25<26:31, 3.63s/it] {'loss': 1.2927, 'grad_norm': 0.0007990137311315236, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:25<26:31, 3.63s/it] 16%|█▌ | 83/520 [05:28<26:37, 3.66s/it] {'loss': 1.3032, 'grad_norm': 0.0008683852475284486, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:28<26:37, 3.66s/it] 16%|█▌ | 84/520 [05:32<26:27, 3.64s/it] {'loss': 1.3171, 'grad_norm': 0.0008787282094895525, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:32<26:27, 3.64s/it] 16%|█▋ | 85/520 [05:36<26:19, 3.63s/it] {'loss': 1.3581, 'grad_norm': 0.0008235460434920135, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:36<26:19, 3.63s/it] 17%|█▋ | 86/520 [05:39<26:14, 3.63s/it] {'loss': 1.3475, 'grad_norm': 0.0008050537265472006, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:39<26:14, 3.63s/it] 17%|█▋ | 87/520 [05:43<26:11, 3.63s/it] {'loss': 1.2648, 'grad_norm': 0.0007752990262785964, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:43<26:11, 3.63s/it] 17%|█▋ | 88/520 [05:47<26:08, 3.63s/it] {'loss': 1.2071, 'grad_norm': 0.0006185632248189271, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:47<26:08, 3.63s/it] 17%|█▋ | 89/520 [05:50<26:01, 3.62s/it] {'loss': 1.3046, 'grad_norm': 0.0008327460790939305, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:50<26:01, 3.62s/it] 17%|█▋ | 90/520 [05:54<25:57, 3.62s/it] {'loss': 1.2419, 'grad_norm': 0.0008194038833416649, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:54<25:57, 3.62s/it] 18%|█▊ | 91/520 [05:57<25:49, 3.61s/it] {'loss': 1.3086, 'grad_norm': 0.0007675398967669145, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:57<25:49, 3.61s/it] 18%|█▊ | 92/520 [06:01<25:42, 3.60s/it] {'loss': 1.2528, 'grad_norm': 0.0008660606588771448, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:01<25:42, 3.60s/it] 18%|█▊ | 93/520 [06:05<25:39, 3.61s/it] {'loss': 1.2555, 'grad_norm': 0.0008459356630463235, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:05<25:39, 3.61s/it] 18%|█▊ | 94/520 [06:08<25:38, 3.61s/it] {'loss': 1.3371, 'grad_norm': 0.0008062402537173276, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:08<25:38, 3.61s/it] 18%|█▊ | 95/520 [06:12<25:32, 3.61s/it] {'loss': 1.2399, 'grad_norm': 0.0009438335761205934, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:12<25:32, 3.61s/it] 18%|█▊ | 96/520 [06:15<25:28, 3.60s/it] {'loss': 1.2598, 'grad_norm': 0.000745231977542341, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:15<25:28, 3.60s/it] 19%|█▊ | 97/520 [06:19<25:26, 3.61s/it] {'loss': 1.2258, 'grad_norm': 0.0009320218161632855, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:19<25:26, 3.61s/it] 19%|█▉ | 98/520 [06:23<25:25, 3.61s/it] {'loss': 1.2307, 'grad_norm': 0.0007020047301365918, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:23<25:25, 3.61s/it] 19%|█▉ | 99/520 [06:26<25:21, 3.61s/it] {'loss': 1.2318, 'grad_norm': 0.0008660801670010897, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:26<25:21, 3.61s/it] 19%|█▉ | 100/520 [06:30<25:15, 3.61s/it] {'loss': 1.2116, 'grad_norm': 0.0007332155787644599, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:30<25:15, 3.61s/it] 19%|█▉ | 101/520 [06:33<25:10, 3.60s/it] {'loss': 1.2512, 'grad_norm': 0.0008322242359028254, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:33<25:10, 3.60s/it] 20%|█▉ | 102/520 [06:37<25:04, 3.60s/it] {'loss': 1.2528, 'grad_norm': 0.0008547742229764394, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:37<25:04, 3.60s/it] 20%|█▉ | 103/520 [06:41<25:00, 3.60s/it] {'loss': 1.1828, 'grad_norm': 0.0007364151354005231, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:41<25:00, 3.60s/it] 20%|██ | 104/520 [06:44<24:57, 3.60s/it] {'loss': 1.2589, 'grad_norm': 0.0008335189891827943, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:44<24:57, 3.60s/it] 20%|██ | 105/520 [06:48<24:58, 3.61s/it] {'loss': 1.2494, 'grad_norm': 0.0007966264102802755, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:48<24:58, 3.61s/it] 20%|██ | 106/520 [06:51<24:54, 3.61s/it] {'loss': 1.2397, 'grad_norm': 0.0007296195122094549, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:51<24:54, 3.61s/it] 21%|██ | 107/520 [06:55<24:54, 3.62s/it] {'loss': 1.2133, 'grad_norm': 0.0007646929016938297, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:55<24:54, 3.62s/it] 21%|██ | 108/520 [06:59<24:51, 3.62s/it] {'loss': 1.2075, 'grad_norm': 0.000858550237965603, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:59<24:51, 3.62s/it] 21%|██ | 109/520 [07:02<24:49, 3.62s/it] {'loss': 1.1917, 'grad_norm': 0.0007031524640892834, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:02<24:49, 3.62s/it] 21%|██ | 110/520 [07:06<25:06, 3.67s/it] {'loss': 1.3837, 'grad_norm': 0.000841475282537919, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:06<25:06, 3.67s/it] 21%|██▏ | 111/520 [07:10<25:16, 3.71s/it] {'loss': 1.3777, 'grad_norm': 0.0009001315833469796, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:10<25:16, 3.71s/it] 22%|██▏ | 112/520 [07:14<25:19, 3.72s/it] {'loss': 1.268, 'grad_norm': 0.0007918707646675798, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:14<25:19, 3.72s/it] 22%|██▏ | 113/520 [07:17<25:21, 3.74s/it] {'loss': 1.1666, 'grad_norm': 0.000780370636317029, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:18<25:21, 3.74s/it] 22%|██▏ | 114/520 [07:21<25:28, 3.77s/it] {'loss': 1.2602, 'grad_norm': 0.0007922211218908618, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:21<25:28, 3.77s/it] 22%|██▏ | 115/520 [07:25<25:26, 3.77s/it] {'loss': 1.3497, 'grad_norm': 0.0007948389426421495, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:25<25:26, 3.77s/it] 22%|██▏ | 116/520 [07:29<25:26, 3.78s/it] {'loss': 1.3639, 'grad_norm': 0.0007605849457547856, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:29<25:26, 3.78s/it] 22%|██▎ | 117/520 [07:33<25:24, 3.78s/it] {'loss': 1.3236, 'grad_norm': 0.0008316029876315014, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:33<25:24, 3.78s/it] 23%|██▎ | 118/520 [07:36<25:19, 3.78s/it] {'loss': 1.2551, 'grad_norm': 0.0007667639528571978, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:36<25:19, 3.78s/it] 23%|██▎ | 119/520 [07:40<25:19, 3.79s/it] {'loss': 1.2129, 'grad_norm': 0.0008372312895804127, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:40<25:19, 3.79s/it] 23%|██▎ | 120/520 [07:44<25:17, 3.79s/it] {'loss': 1.2205, 'grad_norm': 0.0008968228484587874, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:44<25:17, 3.79s/it] 23%|██▎ | 121/520 [07:48<25:15, 3.80s/it] {'loss': 1.2665, 'grad_norm': 0.0008444894101954239, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:48<25:15, 3.80s/it] 23%|██▎ | 122/520 [07:52<25:15, 3.81s/it] {'loss': 1.1873, 'grad_norm': 0.0007618623417400323, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:52<25:15, 3.81s/it] 24%|██▎ | 123/520 [07:56<25:14, 3.81s/it] {'loss': 1.2824, 'grad_norm': 0.0007753023199520166, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:56<25:14, 3.81s/it] 24%|██▍ | 124/520 [07:59<25:09, 3.81s/it] {'loss': 1.2384, 'grad_norm': 0.0008851650496277719, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:59<25:09, 3.81s/it] 24%|██▍ | 125/520 [08:03<25:12, 3.83s/it] {'loss': 1.2368, 'grad_norm': 0.0007923684354123103, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:03<25:12, 3.83s/it] 24%|██▍ | 126/520 [08:08<26:28, 4.03s/it] {'loss': 1.2173, 'grad_norm': 0.0007031587247619558, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:08<26:28, 4.03s/it] 24%|██▍ | 127/520 [08:12<25:54, 3.96s/it] {'loss': 1.2194, 'grad_norm': 0.0009433274507011795, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:12<25:54, 3.96s/it] 25%|██▍ | 128/520 [08:15<25:30, 3.90s/it] {'loss': 1.2573, 'grad_norm': 0.0008386504129217873, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:15<25:30, 3.90s/it] 25%|██▍ | 129/520 [08:19<25:10, 3.86s/it] {'loss': 1.2278, 'grad_norm': 0.0007510439167993705, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:19<25:10, 3.86s/it] 25%|██▌ | 130/520 [08:23<24:38, 3.79s/it] {'loss': 1.2492, 'grad_norm': 0.000747384849746854, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:23<24:38, 3.79s/it] 25%|██▌ | 131/520 [08:26<24:15, 3.74s/it] {'loss': 1.1815, 'grad_norm': 0.0007138282979034655, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:26<24:15, 3.74s/it] 25%|██▌ | 132/520 [08:30<24:04, 3.72s/it] {'loss': 1.2994, 'grad_norm': 0.0009075623708498735, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:30<24:04, 3.72s/it] 26%|██▌ | 133/520 [08:34<23:51, 3.70s/it] {'loss': 1.2192, 'grad_norm': 0.0008998683407648695, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:34<23:51, 3.70s/it] 26%|██▌ | 134/520 [08:37<23:39, 3.68s/it] {'loss': 1.2934, 'grad_norm': 0.0008136309688468585, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:37<23:39, 3.68s/it] 26%|██▌ | 135/520 [08:41<23:30, 3.66s/it] {'loss': 1.3405, 'grad_norm': 0.0008166479196071737, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:41<23:30, 3.66s/it] 26%|██▌ | 136/520 [08:44<23:19, 3.65s/it] {'loss': 1.2903, 'grad_norm': 0.000806187373149868, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:44<23:19, 3.65s/it] 26%|██▋ | 137/520 [08:48<23:12, 3.64s/it] {'loss': 1.2096, 'grad_norm': 0.0009219563603752919, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:48<23:12, 3.64s/it] 27%|██▋ | 138/520 [08:52<23:09, 3.64s/it] {'loss': 1.2227, 'grad_norm': 0.0007460942126078928, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:52<23:09, 3.64s/it] 27%|██▋ | 139/520 [08:56<23:26, 3.69s/it] {'loss': 1.0934, 'grad_norm': 0.0007180425183659773, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:56<23:26, 3.69s/it] 27%|██▋ | 140/520 [08:59<23:43, 3.75s/it] {'loss': 1.2349, 'grad_norm': 0.0007250903052794618, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:59<23:43, 3.75s/it] 27%|██▋ | 141/520 [09:03<23:49, 3.77s/it] {'loss': 1.3211, 'grad_norm': 0.0007806049112475205, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:03<23:49, 3.77s/it] 27%|██▋ | 142/520 [09:07<23:50, 3.79s/it] {'loss': 1.2403, 'grad_norm': 0.000759745423354367, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:07<23:50, 3.79s/it] 28%|██▊ | 143/520 [09:11<23:53, 3.80s/it] {'loss': 1.2515, 'grad_norm': 0.0008604628708878362, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:11<23:53, 3.80s/it] 28%|██▊ | 144/520 [09:15<23:54, 3.82s/it] {'loss': 1.2237, 'grad_norm': 0.0008607308370949633, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:15<23:54, 3.82s/it] 28%|██▊ | 145/520 [09:19<23:56, 3.83s/it] {'loss': 1.1527, 'grad_norm': 0.0007493684966474729, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:19<23:56, 3.83s/it] 28%|██▊ | 146/520 [09:22<23:54, 3.83s/it] {'loss': 1.2895, 'grad_norm': 0.0007997224960199048, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:22<23:54, 3.83s/it] 28%|██▊ | 147/520 [09:26<23:50, 3.84s/it] {'loss': 1.1995, 'grad_norm': 0.0008161603792398267, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:26<23:50, 3.84s/it] 28%|██▊ | 148/520 [09:30<23:47, 3.84s/it] {'loss': 1.2212, 'grad_norm': 0.0007719414026425807, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:30<23:47, 3.84s/it] 29%|██▊ | 149/520 [09:34<23:51, 3.86s/it] {'loss': 1.1659, 'grad_norm': 0.0007926271211317305, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:34<23:51, 3.86s/it] 29%|██▉ | 150/520 [09:38<23:45, 3.85s/it] {'loss': 1.3803, 'grad_norm': 0.0008039391350292068, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:38<23:45, 3.85s/it] 29%|██▉ | 151/520 [09:42<23:43, 3.86s/it] {'loss': 1.2129, 'grad_norm': 0.000826496185123914, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:42<23:43, 3.86s/it] 29%|██▉ | 152/520 [09:46<23:31, 3.84s/it] {'loss': 1.1823, 'grad_norm': 0.0008304260042841759, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:46<23:31, 3.84s/it] 29%|██▉ | 153/520 [09:49<23:03, 3.77s/it] {'loss': 1.2166, 'grad_norm': 0.0008017222369841718, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:49<23:03, 3.77s/it] 30%|██▉ | 154/520 [09:53<22:43, 3.73s/it] {'loss': 1.2967, 'grad_norm': 0.0007795692734615272, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:53<22:43, 3.73s/it] 30%|██▉ | 155/520 [09:56<22:26, 3.69s/it] {'loss': 1.2113, 'grad_norm': 0.0008283430761170001, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:56<22:26, 3.69s/it] 30%|███ | 156/520 [10:00<22:18, 3.68s/it] {'loss': 1.2342, 'grad_norm': 0.0008979547953809341, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [10:00<22:18, 3.68s/it] 30%|███ | 157/520 [10:04<22:10, 3.67s/it] {'loss': 1.282, 'grad_norm': 0.0007605696284158392, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:04<22:10, 3.67s/it] 30%|███ | 158/520 [10:07<22:02, 3.65s/it] {'loss': 1.2166, 'grad_norm': 0.0008019972831189796, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:07<22:02, 3.65s/it] 31%|███ | 159/520 [10:11<21:58, 3.65s/it] {'loss': 1.2666, 'grad_norm': 0.0008011141368667899, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:11<21:58, 3.65s/it] 31%|███ | 160/520 [10:15<21:52, 3.65s/it] {'loss': 1.2693, 'grad_norm': 0.000835225237921599, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:15<21:52, 3.65s/it] 31%|███ | 161/520 [10:18<21:44, 3.63s/it] {'loss': 1.2465, 'grad_norm': 0.000799617530880977, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:18<21:44, 3.63s/it] 31%|███ | 162/520 [10:22<21:41, 3.64s/it] {'loss': 1.2273, 'grad_norm': 0.000753552513332703, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:22<21:41, 3.64s/it] 31%|███▏ | 163/520 [10:25<21:36, 3.63s/it] {'loss': 1.1492, 'grad_norm': 0.0008952348417767574, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:25<21:36, 3.63s/it] 32%|███▏ | 164/520 [10:29<21:36, 3.64s/it] {'loss': 1.1105, 'grad_norm': 0.0007538623909672317, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:29<21:36, 3.64s/it] 32%|███▏ | 165/520 [10:33<21:33, 3.64s/it] {'loss': 1.2651, 'grad_norm': 0.0007799419560415845, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:33<21:33, 3.64s/it] 32%|███▏ | 166/520 [10:36<21:28, 3.64s/it] {'loss': 1.2254, 'grad_norm': 0.0008580004884686078, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:36<21:28, 3.64s/it] 32%|███▏ | 167/520 [10:40<21:29, 3.65s/it] {'loss': 1.2206, 'grad_norm': 0.000771448126011137, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:40<21:29, 3.65s/it] 32%|███▏ | 168/520 [10:44<21:23, 3.65s/it] {'loss': 1.166, 'grad_norm': 0.00075771752171489, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:44<21:23, 3.65s/it] 32%|███▎ | 169/520 [10:47<21:22, 3.65s/it] {'loss': 1.2409, 'grad_norm': 0.000777867071391893, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:47<21:22, 3.65s/it] 33%|███▎ | 170/520 [10:51<21:26, 3.68s/it] {'loss': 1.1812, 'grad_norm': 0.0006613733701088239, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:51<21:26, 3.68s/it] 33%|███▎ | 171/520 [10:55<21:17, 3.66s/it] {'loss': 1.1758, 'grad_norm': 0.0008314138817870329, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:55<21:17, 3.66s/it] 33%|███▎ | 172/520 [10:58<21:10, 3.65s/it] {'loss': 1.2505, 'grad_norm': 0.0007618436122820655, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:58<21:10, 3.65s/it] 33%|███▎ | 173/520 [11:02<21:03, 3.64s/it] {'loss': 1.1882, 'grad_norm': 0.0007731845089008815, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:02<21:03, 3.64s/it] 33%|███▎ | 174/520 [11:06<20:58, 3.64s/it] {'loss': 1.2383, 'grad_norm': 0.0008069991112292596, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:06<20:58, 3.64s/it] 34%|███▎ | 175/520 [11:09<20:56, 3.64s/it] {'loss': 1.1565, 'grad_norm': 0.0007310155580599093, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:09<20:56, 3.64s/it] 34%|███▍ | 176/520 [11:13<20:51, 3.64s/it] {'loss': 1.2567, 'grad_norm': 0.0007761816712162395, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:13<20:51, 3.64s/it] 34%|███▍ | 177/520 [11:17<20:45, 3.63s/it] {'loss': 1.1363, 'grad_norm': 0.0007538278392962676, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:17<20:45, 3.63s/it] 34%|███▍ | 178/520 [11:20<20:42, 3.63s/it] {'loss': 1.2233, 'grad_norm': 0.0008499750461993885, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:20<20:42, 3.63s/it] 34%|███▍ | 179/520 [11:24<20:35, 3.62s/it] {'loss': 1.2963, 'grad_norm': 0.0007540126121534567, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:24<20:35, 3.62s/it] 35%|███▍ | 180/520 [11:27<20:33, 3.63s/it] {'loss': 1.2165, 'grad_norm': 0.0008069489159490405, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:27<20:33, 3.63s/it] 35%|███▍ | 181/520 [11:31<20:31, 3.63s/it] {'loss': 1.1957, 'grad_norm': 0.0006948049583228637, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:31<20:31, 3.63s/it] 35%|███▌ | 182/520 [11:35<20:30, 3.64s/it] {'loss': 1.2068, 'grad_norm': 0.0008217066214999825, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:35<20:30, 3.64s/it] 35%|███▌ | 183/520 [11:38<20:28, 3.65s/it] {'loss': 1.2272, 'grad_norm': 0.0007670933016701431, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:38<20:28, 3.65s/it] 35%|███▌ | 184/520 [11:42<20:25, 3.65s/it] {'loss': 1.1675, 'grad_norm': 0.0008459660333890718, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:42<20:25, 3.65s/it] 36%|███▌ | 185/520 [11:46<20:20, 3.64s/it] {'loss': 1.2963, 'grad_norm': 0.0007928039516556791, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:46<20:20, 3.64s/it] 36%|███▌ | 186/520 [11:49<20:17, 3.64s/it] {'loss': 1.1877, 'grad_norm': 0.0008130924216607893, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:49<20:17, 3.64s/it] 36%|███▌ | 187/520 [11:53<20:10, 3.64s/it] {'loss': 1.184, 'grad_norm': 0.0008844459994246056, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:53<20:10, 3.64s/it] 36%|███▌ | 188/520 [11:57<20:06, 3.63s/it] {'loss': 1.2767, 'grad_norm': 0.0008237312445496705, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:57<20:06, 3.63s/it] 36%|███▋ | 189/520 [12:00<20:04, 3.64s/it] {'loss': 1.2728, 'grad_norm': 0.0007344044619216577, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:00<20:04, 3.64s/it] 37%|███▋ | 190/520 [12:04<19:58, 3.63s/it] {'loss': 1.1989, 'grad_norm': 0.0008362370643193479, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:04<19:58, 3.63s/it] 37%|███▋ | 191/520 [12:07<19:51, 3.62s/it] {'loss': 1.1617, 'grad_norm': 0.0007511945694277316, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:07<19:51, 3.62s/it] 37%|███▋ | 192/520 [12:11<19:49, 3.63s/it] {'loss': 1.237, 'grad_norm': 0.0007402867317735603, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:11<19:49, 3.63s/it] 37%|███▋ | 193/520 [12:15<19:45, 3.62s/it] {'loss': 1.1856, 'grad_norm': 0.0008382135201607184, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:15<19:45, 3.62s/it] 37%|███▋ | 194/520 [12:18<19:41, 3.62s/it] {'loss': 1.0859, 'grad_norm': 0.0006839326907608066, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:18<19:41, 3.62s/it] 38%|███▊ | 195/520 [12:22<19:37, 3.62s/it] {'loss': 1.2578, 'grad_norm': 0.0007825912730814061, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:22<19:37, 3.62s/it] 38%|███▊ | 196/520 [12:25<19:30, 3.61s/it] {'loss': 1.2373, 'grad_norm': 0.0008795785305657441, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:25<19:30, 3.61s/it] 38%|███▊ | 197/520 [12:29<19:31, 3.63s/it] {'loss': 1.1833, 'grad_norm': 0.0007941338434030918, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:29<19:31, 3.63s/it] 38%|███▊ | 198/520 [12:33<19:27, 3.62s/it] {'loss': 1.2491, 'grad_norm': 0.0008466431488977221, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:33<19:27, 3.62s/it] 38%|███▊ | 199/520 [12:36<19:24, 3.63s/it] {'loss': 1.1707, 'grad_norm': 0.000802955398477392, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:36<19:24, 3.63s/it] 38%|███▊ | 200/520 [12:40<19:19, 3.62s/it] {'loss': 1.1434, 'grad_norm': 0.0007765988624250352, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:40<19:19, 3.62s/it] 39%|███▊ | 201/520 [12:44<19:16, 3.63s/it] {'loss': 1.1635, 'grad_norm': 0.0006923708465657243, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:44<19:16, 3.63s/it] 39%|███▉ | 202/520 [12:47<19:31, 3.68s/it] {'loss': 1.1779, 'grad_norm': 0.0008024486049843177, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:47<19:31, 3.68s/it] 39%|███▉ | 203/520 [12:51<19:43, 3.73s/it] {'loss': 1.2237, 'grad_norm': 0.0008321294173753746, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:51<19:43, 3.73s/it] 39%|███▉ | 204/520 [12:55<19:50, 3.77s/it] {'loss': 1.2373, 'grad_norm': 0.0008156446518684679, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:55<19:50, 3.77s/it] 39%|███▉ | 205/520 [12:59<19:53, 3.79s/it] {'loss': 1.1633, 'grad_norm': 0.000744368247019453, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:59<19:53, 3.79s/it] 40%|███▉ | 206/520 [13:03<19:53, 3.80s/it] {'loss': 1.2681, 'grad_norm': 0.0007736374598004208, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:03<19:53, 3.80s/it] 40%|███▉ | 207/520 [13:07<19:51, 3.81s/it] {'loss': 1.1307, 'grad_norm': 0.0006823136080314888, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:07<19:51, 3.81s/it] 40%|████ | 208/520 [13:10<19:28, 3.75s/it] {'loss': 1.2651, 'grad_norm': 0.0008840556226360319, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:10<19:28, 3.75s/it] 40%|████ | 209/520 [13:14<19:12, 3.71s/it] {'loss': 1.1785, 'grad_norm': 0.0007556827305390971, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:14<19:12, 3.71s/it] 40%|████ | 210/520 [13:17<18:58, 3.67s/it] {'loss': 1.2442, 'grad_norm': 0.0007971213367732376, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:17<18:58, 3.67s/it] 41%|████ | 211/520 [13:21<18:50, 3.66s/it] {'loss': 1.246, 'grad_norm': 0.000735768395109605, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:21<18:50, 3.66s/it] 41%|████ | 212/520 [13:25<18:41, 3.64s/it] {'loss': 1.2456, 'grad_norm': 0.0007651241104232333, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:25<18:41, 3.64s/it] 41%|████ | 213/520 [13:28<18:47, 3.67s/it] {'loss': 1.1946, 'grad_norm': 0.0008725192609533904, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:28<18:47, 3.67s/it] 41%|████ | 214/520 [13:32<19:01, 3.73s/it] {'loss': 1.1857, 'grad_norm': 0.0008171169494608911, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:32<19:01, 3.73s/it] 41%|████▏ | 215/520 [13:36<19:08, 3.77s/it] {'loss': 1.1002, 'grad_norm': 0.0007434570064369771, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:36<19:08, 3.77s/it] 42%|████▏ | 216/520 [13:40<19:15, 3.80s/it] {'loss': 1.1128, 'grad_norm': 0.0007933971974089238, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:40<19:15, 3.80s/it] 42%|████▏ | 217/520 [13:44<19:15, 3.81s/it] {'loss': 1.2422, 'grad_norm': 0.0008356450858206227, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:44<19:15, 3.81s/it] 42%|████▏ | 218/520 [13:48<19:14, 3.82s/it] {'loss': 1.2162, 'grad_norm': 0.0008773811137672098, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:48<19:14, 3.82s/it] 42%|████▏ | 219/520 [13:52<19:15, 3.84s/it] {'loss': 1.2273, 'grad_norm': 0.0007226018968768723, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:52<19:15, 3.84s/it] 42%|████▏ | 220/520 [13:55<19:10, 3.84s/it] {'loss': 1.1478, 'grad_norm': 0.0007261294513039737, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:55<19:10, 3.84s/it] 42%|████▎ | 221/520 [13:59<19:09, 3.84s/it] {'loss': 1.2239, 'grad_norm': 0.0007805297854649592, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:59<19:09, 3.84s/it] 43%|████▎ | 222/520 [14:03<19:06, 3.85s/it] {'loss': 1.1691, 'grad_norm': 0.0007855043771553457, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:03<19:06, 3.85s/it] 43%|████▎ | 223/520 [14:07<19:06, 3.86s/it] {'loss': 1.1638, 'grad_norm': 0.0007790413940848663, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:07<19:06, 3.86s/it] 43%|████▎ | 224/520 [14:11<19:03, 3.86s/it] {'loss': 1.1982, 'grad_norm': 0.000688920250579134, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:11<19:03, 3.86s/it] 43%|████▎ | 225/520 [14:15<18:58, 3.86s/it] {'loss': 1.1634, 'grad_norm': 0.0007856030213162751, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:15<18:58, 3.86s/it] 43%|████▎ | 226/520 [14:19<18:52, 3.85s/it] {'loss': 1.2621, 'grad_norm': 0.0007635784087502477, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:19<18:52, 3.85s/it] 44%|████▎ | 227/520 [14:22<18:50, 3.86s/it] {'loss': 1.2482, 'grad_norm': 0.0007560866931441594, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:22<18:50, 3.86s/it] 44%|████▍ | 228/520 [14:26<18:45, 3.86s/it] {'loss': 1.2444, 'grad_norm': 0.0007865523812851236, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:26<18:45, 3.86s/it] 44%|████▍ | 229/520 [14:30<18:46, 3.87s/it] {'loss': 1.222, 'grad_norm': 0.0007365633140998465, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:30<18:46, 3.87s/it] 44%|████▍ | 230/520 [14:34<18:40, 3.86s/it] {'loss': 1.1149, 'grad_norm': 0.0007545172425012789, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:34<18:40, 3.86s/it] 44%|████▍ | 231/520 [14:38<18:38, 3.87s/it] {'loss': 1.1792, 'grad_norm': 0.0007258982511755947, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:38<18:38, 3.87s/it] 45%|████▍ | 232/520 [14:42<18:34, 3.87s/it] {'loss': 1.2788, 'grad_norm': 0.0007981627310252492, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:42<18:34, 3.87s/it] 45%|████▍ | 233/520 [14:46<18:29, 3.87s/it] {'loss': 1.1704, 'grad_norm': 0.0008049410033673957, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:46<18:29, 3.87s/it] 45%|████▌ | 234/520 [14:50<18:22, 3.85s/it] {'loss': 1.1371, 'grad_norm': 0.0008237987229253948, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:50<18:22, 3.85s/it] 45%|████▌ | 235/520 [14:53<18:20, 3.86s/it] {'loss': 1.1854, 'grad_norm': 0.0007842429472972422, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:53<18:20, 3.86s/it] 45%|████▌ | 236/520 [14:57<18:01, 3.81s/it] {'loss': 1.2507, 'grad_norm': 0.0007355612203715916, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:57<18:01, 3.81s/it] 46%|████▌ | 237/520 [15:01<17:48, 3.78s/it] {'loss': 1.2587, 'grad_norm': 0.0007762069838896443, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:01<17:48, 3.78s/it] 46%|████▌ | 238/520 [15:04<17:32, 3.73s/it] {'loss': 1.1889, 'grad_norm': 0.0007978358081127201, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:04<17:32, 3.73s/it] 46%|████▌ | 239/520 [15:08<17:21, 3.71s/it] {'loss': 1.2563, 'grad_norm': 0.0008114294109900866, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:08<17:21, 3.71s/it] 46%|████▌ | 240/520 [15:12<17:13, 3.69s/it] {'loss': 1.0821, 'grad_norm': 0.0007219739511032741, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:12<17:13, 3.69s/it] 46%|████▋ | 241/520 [15:15<17:06, 3.68s/it] {'loss': 1.165, 'grad_norm': 0.0007773211432266102, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:15<17:06, 3.68s/it] 47%|████▋ | 242/520 [15:19<16:56, 3.66s/it] {'loss': 1.1728, 'grad_norm': 0.0007431800857281891, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:19<16:56, 3.66s/it] 47%|████▋ | 243/520 [15:23<16:51, 3.65s/it] {'loss': 1.1706, 'grad_norm': 0.0007877657949442372, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:23<16:51, 3.65s/it] 47%|████▋ | 244/520 [15:26<16:43, 3.64s/it] {'loss': 1.2702, 'grad_norm': 0.0007819515416689795, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:26<16:43, 3.64s/it] 47%|████▋ | 245/520 [15:30<16:40, 3.64s/it] {'loss': 1.1494, 'grad_norm': 0.0007939398073451061, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:30<16:40, 3.64s/it] 47%|████▋ | 246/520 [15:33<16:37, 3.64s/it] {'loss': 1.2716, 'grad_norm': 0.0007750230086058441, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:33<16:37, 3.64s/it] 48%|████▊ | 247/520 [15:37<16:31, 3.63s/it] {'loss': 1.3248, 'grad_norm': 0.0008195153691288585, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:37<16:31, 3.63s/it] 48%|████▊ | 248/520 [15:41<16:26, 3.63s/it] {'loss': 1.1458, 'grad_norm': 0.0007784048932712975, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:41<16:26, 3.63s/it] 48%|████▊ | 249/520 [15:44<16:21, 3.62s/it] {'loss': 1.2339, 'grad_norm': 0.0007744274728265433, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:44<16:21, 3.62s/it] 48%|████▊ | 250/520 [15:48<16:19, 3.63s/it] {'loss': 1.1807, 'grad_norm': 0.0008214861517981275, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:48<16:19, 3.63s/it] 48%|████▊ | 251/520 [15:52<16:16, 3.63s/it] {'loss': 1.2447, 'grad_norm': 0.0007271351779952475, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:52<16:16, 3.63s/it] 48%|████▊ | 252/520 [15:55<16:15, 3.64s/it] {'loss': 1.1749, 'grad_norm': 0.0007270340307503484, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:55<16:15, 3.64s/it] 49%|████▊ | 253/520 [15:59<16:13, 3.65s/it] {'loss': 1.2343, 'grad_norm': 0.0008621913451009947, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:59<16:13, 3.65s/it] 49%|████▉ | 254/520 [16:03<16:10, 3.65s/it] {'loss': 1.1794, 'grad_norm': 0.0007493417800564571, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:03<16:10, 3.65s/it] 49%|████▉ | 255/520 [16:06<16:06, 3.65s/it] {'loss': 1.1797, 'grad_norm': 0.0008349728151261812, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:06<16:06, 3.65s/it] 49%|████▉ | 256/520 [16:10<16:01, 3.64s/it] {'loss': 1.2348, 'grad_norm': 0.0008267060819077563, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:10<16:01, 3.64s/it] 49%|████▉ | 257/520 [16:13<15:56, 3.64s/it] {'loss': 1.215, 'grad_norm': 0.0008049424785793861, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:13<15:56, 3.64s/it] 50%|████▉ | 258/520 [16:17<15:50, 3.63s/it] {'loss': 1.2172, 'grad_norm': 0.0007028738005676862, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:17<15:50, 3.63s/it] 50%|████▉ | 259/520 [16:21<15:45, 3.62s/it] {'loss': 1.2955, 'grad_norm': 0.0008874079183014398, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:21<15:45, 3.62s/it] 50%|█████ | 260/520 [16:24<15:41, 3.62s/it] {'loss': 1.2175, 'grad_norm': 0.0006486132439007768, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:24<15:41, 3.62s/it] 50%|█████ | 261/520 [16:28<15:41, 3.64s/it] {'loss': 1.1664, 'grad_norm': 0.0007617338522333074, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:28<15:41, 3.64s/it] 50%|█████ | 262/520 [16:32<15:38, 3.64s/it] {'loss': 1.1572, 'grad_norm': 0.0007854104591428809, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:32<15:38, 3.64s/it] 51%|█████ | 263/520 [16:35<15:31, 3.63s/it] {'loss': 1.1911, 'grad_norm': 0.0007691249510194783, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:35<15:31, 3.63s/it] 51%|█████ | 264/520 [16:39<15:29, 3.63s/it] {'loss': 1.2441, 'grad_norm': 0.0007597130633618716, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:39<15:29, 3.63s/it] 51%|█████ | 265/520 [16:43<15:26, 3.63s/it] {'loss': 1.1665, 'grad_norm': 0.0008651673327273338, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:43<15:26, 3.63s/it] 51%|█████ | 266/520 [16:46<15:20, 3.62s/it] {'loss': 1.0515, 'grad_norm': 0.0006931893520709799, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:46<15:20, 3.62s/it] 51%|█████▏ | 267/520 [16:50<15:22, 3.65s/it] {'loss': 1.1653, 'grad_norm': 0.0007508986982517307, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:50<15:22, 3.65s/it] 52%|█████▏ | 268/520 [16:53<15:16, 3.64s/it] {'loss': 1.2841, 'grad_norm': 0.0007823345454203218, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:53<15:16, 3.64s/it] 52%|█████▏ | 269/520 [16:57<15:11, 3.63s/it] {'loss': 1.2683, 'grad_norm': 0.0008243763307342844, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:57<15:11, 3.63s/it] 52%|█████▏ | 270/520 [17:01<15:11, 3.65s/it] {'loss': 1.1282, 'grad_norm': 0.0007372684654839141, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:01<15:11, 3.65s/it] 52%|█████▏ | 271/520 [17:04<15:06, 3.64s/it] {'loss': 1.2454, 'grad_norm': 0.000802877309202303, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:04<15:06, 3.64s/it] 52%|█████▏ | 272/520 [17:08<15:04, 3.65s/it] {'loss': 1.1403, 'grad_norm': 0.0007358666285195648, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:08<15:04, 3.65s/it] 52%|█████▎ | 273/520 [17:12<15:00, 3.65s/it] {'loss': 1.2471, 'grad_norm': 0.0007404331839090412, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:12<15:00, 3.65s/it] 53%|█████▎ | 274/520 [17:15<14:54, 3.64s/it] {'loss': 1.2308, 'grad_norm': 0.0008244706049190011, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:15<14:54, 3.64s/it] 53%|█████▎ | 275/520 [17:19<14:55, 3.65s/it] {'loss': 1.177, 'grad_norm': 0.0007911971975237495, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:19<14:55, 3.65s/it] 53%|█████▎ | 276/520 [17:23<14:51, 3.65s/it] {'loss': 1.2353, 'grad_norm': 0.0008406915343854384, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:23<14:51, 3.65s/it] 53%|█████▎ | 277/520 [17:26<14:48, 3.66s/it] {'loss': 1.2585, 'grad_norm': 0.0007090565747908382, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:26<14:48, 3.66s/it] 53%|█████▎ | 278/520 [17:30<14:43, 3.65s/it] {'loss': 1.1325, 'grad_norm': 0.0007174375601534513, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:30<14:43, 3.65s/it] 54%|█████▎ | 279/520 [17:34<14:37, 3.64s/it] {'loss': 1.1335, 'grad_norm': 0.0007877163895267802, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:34<14:37, 3.64s/it] 54%|█████▍ | 280/520 [17:37<14:34, 3.65s/it] {'loss': 1.1644, 'grad_norm': 0.0008677835052413562, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:37<14:34, 3.65s/it] 54%|█████▍ | 281/520 [17:41<14:30, 3.64s/it] {'loss': 1.2681, 'grad_norm': 0.0008090317122498083, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:41<14:30, 3.64s/it] 54%|█████▍ | 282/520 [17:45<14:31, 3.66s/it] {'loss': 1.1432, 'grad_norm': 0.0007416230209276969, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:45<14:31, 3.66s/it] 54%|█████▍ | 283/520 [17:48<14:26, 3.66s/it] {'loss': 1.2781, 'grad_norm': 0.0008526378490201962, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:48<14:26, 3.66s/it] 55%|█████▍ | 284/520 [17:52<14:21, 3.65s/it] {'loss': 1.1419, 'grad_norm': 0.0008058377135521275, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:52<14:21, 3.65s/it] 55%|█████▍ | 285/520 [17:55<14:14, 3.64s/it] {'loss': 1.1688, 'grad_norm': 0.0007884045936983124, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:55<14:14, 3.64s/it] 55%|█████▌ | 286/520 [17:59<14:10, 3.64s/it] {'loss': 1.0537, 'grad_norm': 0.0007773565116096354, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:59<14:10, 3.64s/it] 55%|█████▌ | 287/520 [18:03<14:08, 3.64s/it] {'loss': 1.2768, 'grad_norm': 0.0007720769843524317, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:03<14:08, 3.64s/it] 55%|█████▌ | 288/520 [18:06<14:04, 3.64s/it] {'loss': 1.2984, 'grad_norm': 0.0007335791315368667, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:06<14:04, 3.64s/it] 56%|█████▌ | 289/520 [18:10<14:02, 3.65s/it] {'loss': 1.1836, 'grad_norm': 0.0007470710082708558, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:10<14:02, 3.65s/it] 56%|█████▌ | 290/520 [18:14<13:57, 3.64s/it] {'loss': 1.1094, 'grad_norm': 0.0007162268572200699, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:14<13:57, 3.64s/it] 56%|█████▌ | 291/520 [18:17<13:54, 3.64s/it] {'loss': 1.1503, 'grad_norm': 0.000761754388250863, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:17<13:54, 3.64s/it] 56%|█████▌ | 292/520 [18:21<13:53, 3.65s/it] {'loss': 1.2022, 'grad_norm': 0.0007726207987503785, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:21<13:53, 3.65s/it] 56%|█████▋ | 293/520 [18:25<13:48, 3.65s/it] {'loss': 1.1551, 'grad_norm': 0.0008188488702609322, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:25<13:48, 3.65s/it] 57%|█████▋ | 294/520 [18:28<13:43, 3.64s/it] {'loss': 1.1745, 'grad_norm': 0.0008413385243112787, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:28<13:43, 3.64s/it] 57%|█████▋ | 295/520 [18:32<13:39, 3.64s/it] {'loss': 1.1738, 'grad_norm': 0.0007092268584676746, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:32<13:39, 3.64s/it] 57%|█████▋ | 296/520 [18:35<13:33, 3.63s/it] {'loss': 1.1264, 'grad_norm': 0.0007991425443228463, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:35<13:33, 3.63s/it] 57%|█████▋ | 297/520 [18:39<13:34, 3.65s/it] {'loss': 1.2538, 'grad_norm': 0.0008497147000412976, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:39<13:34, 3.65s/it] 57%|█████▋ | 298/520 [18:43<13:30, 3.65s/it] {'loss': 1.2151, 'grad_norm': 0.0007379904970814644, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:43<13:30, 3.65s/it] 57%|█████▊ | 299/520 [18:46<13:26, 3.65s/it] {'loss': 1.2202, 'grad_norm': 0.0007379637980560925, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:46<13:26, 3.65s/it] 58%|█████▊ | 300/520 [18:50<13:23, 3.65s/it] {'loss': 1.2631, 'grad_norm': 0.0007896282671045941, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:50<13:23, 3.65s/it] 58%|█████▊ | 301/520 [18:54<13:20, 3.65s/it] {'loss': 1.2515, 'grad_norm': 0.0007932505093675099, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:54<13:20, 3.65s/it] 58%|█████▊ | 302/520 [18:57<13:17, 3.66s/it] {'loss': 1.2249, 'grad_norm': 0.0007400040685617055, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:57<13:17, 3.66s/it] 58%|█████▊ | 303/520 [19:01<13:14, 3.66s/it] {'loss': 1.171, 'grad_norm': 0.000859916035653268, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:01<13:14, 3.66s/it] 58%|█████▊ | 304/520 [19:05<13:11, 3.66s/it] {'loss': 1.1377, 'grad_norm': 0.0007928652442831335, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:05<13:11, 3.66s/it] 59%|█████▊ | 305/520 [19:08<13:08, 3.67s/it] {'loss': 1.2761, 'grad_norm': 0.0009213603210859024, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:08<13:08, 3.67s/it] 59%|█████▉ | 306/520 [19:12<13:05, 3.67s/it] {'loss': 1.2211, 'grad_norm': 0.00079368448580183, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:12<13:05, 3.67s/it] 59%|█████▉ | 307/520 [19:16<13:22, 3.77s/it] {'loss': 1.1634, 'grad_norm': 0.0007523970510090796, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:16<13:22, 3.77s/it] 59%|█████▉ | 308/520 [19:20<13:12, 3.74s/it] {'loss': 1.274, 'grad_norm': 0.0007455821808657276, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:20<13:12, 3.74s/it] 59%|█████▉ | 309/520 [19:23<13:02, 3.71s/it] {'loss': 1.1667, 'grad_norm': 0.0007667097477414786, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:23<13:02, 3.71s/it] 60%|█████▉ | 310/520 [19:27<12:57, 3.70s/it] {'loss': 1.1437, 'grad_norm': 0.0007853900082972548, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:27<12:57, 3.70s/it] 60%|█████▉ | 311/520 [19:31<12:50, 3.69s/it] {'loss': 1.1273, 'grad_norm': 0.0007730910687616883, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:31<12:50, 3.69s/it] 60%|██████ | 312/520 [19:34<12:42, 3.67s/it] {'loss': 1.1172, 'grad_norm': 0.0007785620129728391, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:34<12:42, 3.67s/it] 60%|██████ | 313/520 [19:38<12:37, 3.66s/it] {'loss': 1.0997, 'grad_norm': 0.0007100861145240794, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:38<12:37, 3.66s/it] 60%|██████ | 314/520 [19:42<12:55, 3.77s/it] {'loss': 1.1371, 'grad_norm': 0.0007558545322710976, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:42<12:55, 3.77s/it] 61%|██████ | 315/520 [19:46<12:43, 3.72s/it] {'loss': 1.1864, 'grad_norm': 0.0008426353847678352, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:46<12:43, 3.72s/it] 61%|██████ | 316/520 [19:50<13:00, 3.82s/it] {'loss': 1.122, 'grad_norm': 0.0007996933018555158, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:50<13:00, 3.82s/it] 61%|██████ | 317/520 [19:53<12:42, 3.76s/it] {'loss': 1.127, 'grad_norm': 0.0006961293827146655, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:53<12:42, 3.76s/it] 61%|██████ | 318/520 [19:57<12:32, 3.72s/it] {'loss': 1.2359, 'grad_norm': 0.0008510365106748479, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:57<12:32, 3.72s/it] 61%|██████▏ | 319/520 [20:01<12:44, 3.80s/it] {'loss': 1.1202, 'grad_norm': 0.0007074558644459585, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:01<12:44, 3.80s/it] 62%|██████▏ | 320/520 [20:05<12:28, 3.74s/it] {'loss': 1.0668, 'grad_norm': 0.0007519454292508126, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:05<12:28, 3.74s/it] 62%|██████▏ | 321/520 [20:08<12:17, 3.71s/it] {'loss': 1.2611, 'grad_norm': 0.0007643535252698953, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:08<12:17, 3.71s/it] 62%|██████▏ | 322/520 [20:12<12:10, 3.69s/it] {'loss': 1.0871, 'grad_norm': 0.0007373946193439029, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:12<12:10, 3.69s/it] 62%|██████▏ | 323/520 [20:15<12:03, 3.67s/it] {'loss': 1.1538, 'grad_norm': 0.0007715249303679414, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:15<12:03, 3.67s/it] 62%|██████▏ | 324/520 [20:19<11:56, 3.66s/it] {'loss': 1.2041, 'grad_norm': 0.0007910725935130454, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:19<11:56, 3.66s/it] 62%|██████▎ | 325/520 [20:23<11:52, 3.65s/it] {'loss': 1.2031, 'grad_norm': 0.0008616430635876275, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:23<11:52, 3.65s/it] 63%|██████▎ | 326/520 [20:26<11:46, 3.64s/it] {'loss': 1.1991, 'grad_norm': 0.0008243221924099037, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:26<11:46, 3.64s/it] 63%|██████▎ | 327/520 [20:30<11:50, 3.68s/it] {'loss': 1.1882, 'grad_norm': 0.0007911688125181221, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:30<11:50, 3.68s/it] 63%|██████▎ | 328/520 [20:34<11:57, 3.73s/it] {'loss': 1.2408, 'grad_norm': 0.000806475865689869, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:34<11:57, 3.73s/it] 63%|██████▎ | 329/520 [20:38<12:01, 3.78s/it] {'loss': 1.1226, 'grad_norm': 0.0006825158383889773, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:38<12:01, 3.78s/it] 63%|██████▎ | 330/520 [20:42<12:02, 3.80s/it] {'loss': 1.1961, 'grad_norm': 0.0007180352690639996, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:42<12:02, 3.80s/it] 64%|██████▎ | 331/520 [20:46<12:00, 3.81s/it] {'loss': 1.1599, 'grad_norm': 0.0007926784457421072, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:46<12:00, 3.81s/it] 64%|██████▍ | 332/520 [20:49<12:00, 3.83s/it] {'loss': 1.2179, 'grad_norm': 0.0007103399267363475, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:49<12:00, 3.83s/it] 64%|██████▍ | 333/520 [20:53<11:58, 3.84s/it] {'loss': 1.2919, 'grad_norm': 0.0008332183457423156, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:53<11:58, 3.84s/it] 64%|██████▍ | 334/520 [20:57<11:40, 3.77s/it] {'loss': 1.203, 'grad_norm': 0.0008247804997518019, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:57<11:40, 3.77s/it] 64%|██████▍ | 335/520 [21:01<11:28, 3.72s/it] {'loss': 1.2025, 'grad_norm': 0.0007425443816819182, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:01<11:28, 3.72s/it] 65%|██████▍ | 336/520 [21:04<11:19, 3.69s/it] {'loss': 1.108, 'grad_norm': 0.000827204822292347, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:04<11:19, 3.69s/it] 65%|██████▍ | 337/520 [21:08<11:12, 3.67s/it] {'loss': 1.101, 'grad_norm': 0.0007593497378554627, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:08<11:12, 3.67s/it] 65%|██████▌ | 338/520 [21:11<11:04, 3.65s/it] {'loss': 1.2101, 'grad_norm': 0.0007825666442680058, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:11<11:04, 3.65s/it] 65%|██████▌ | 339/520 [21:15<10:58, 3.64s/it] {'loss': 1.1548, 'grad_norm': 0.000794220021015743, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:15<10:58, 3.64s/it] 65%|██████▌ | 340/520 [21:19<10:54, 3.64s/it] {'loss': 1.1413, 'grad_norm': 0.0007389785636259213, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:19<10:54, 3.64s/it] 66%|██████▌ | 341/520 [21:22<10:49, 3.63s/it] {'loss': 1.1693, 'grad_norm': 0.0008063238844312088, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:22<10:49, 3.63s/it] 66%|██████▌ | 342/520 [21:26<10:44, 3.62s/it] {'loss': 1.1918, 'grad_norm': 0.0008895678523973348, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:26<10:44, 3.62s/it] 66%|██████▌ | 343/520 [21:29<10:42, 3.63s/it] {'loss': 1.1435, 'grad_norm': 0.0006387614717856761, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:29<10:42, 3.63s/it] 66%|██████▌ | 344/520 [21:33<10:39, 3.63s/it] {'loss': 1.1264, 'grad_norm': 0.0007009348104723412, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:33<10:39, 3.63s/it] 66%|██████▋ | 345/520 [21:37<10:35, 3.63s/it] {'loss': 1.2284, 'grad_norm': 0.0007863154497699819, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:37<10:35, 3.63s/it] 67%|██████▋ | 346/520 [21:40<10:31, 3.63s/it] {'loss': 1.1633, 'grad_norm': 0.0007488942686000888, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:40<10:31, 3.63s/it] 67%|██████▋ | 347/520 [21:44<10:27, 3.63s/it] {'loss': 1.143, 'grad_norm': 0.000701038005326683, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:44<10:27, 3.63s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:48<10:23, 3.62s/it] {'loss': 1.1024, 'grad_norm': 0.0009153911257827188, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:48<10:23, 3.62s/it] 67%|██████▋ | 349/520 [21:51<10:20, 3.63s/it] {'loss': 1.1389, 'grad_norm': 0.0007732937985908581, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:51<10:20, 3.63s/it] 67%|██████▋ | 350/520 [21:55<10:16, 3.63s/it] {'loss': 1.1819, 'grad_norm': 0.0007928702541825009, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:55<10:16, 3.63s/it] 68%|██████▊ | 351/520 [21:58<10:12, 3.63s/it] {'loss': 1.0935, 'grad_norm': 0.0007244637902521213, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:58<10:12, 3.63s/it] 68%|██████▊ | 352/520 [22:02<10:08, 3.62s/it] {'loss': 1.2077, 'grad_norm': 0.0007291706009588128, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:02<10:08, 3.62s/it] 68%|██████▊ | 353/520 [22:06<10:05, 3.63s/it] {'loss': 1.1312, 'grad_norm': 0.0006359431546969584, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:06<10:05, 3.63s/it] 68%|██████▊ | 354/520 [22:09<10:02, 3.63s/it] {'loss': 1.2309, 'grad_norm': 0.00072034653828787, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:09<10:02, 3.63s/it] 68%|██████▊ | 355/520 [22:13<10:00, 3.64s/it] {'loss': 1.1514, 'grad_norm': 0.0007612872394489098, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:13<10:00, 3.64s/it] 68%|██████▊ | 356/520 [22:17<10:00, 3.66s/it] {'loss': 1.1536, 'grad_norm': 0.0007840389676075111, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:17<10:00, 3.66s/it] 69%|██████▊ | 357/520 [22:20<09:56, 3.66s/it] {'loss': 1.1877, 'grad_norm': 0.0007292516332178026, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:20<09:56, 3.66s/it] 69%|██████▉ | 358/520 [22:24<09:51, 3.65s/it] {'loss': 1.1182, 'grad_norm': 0.0007827966340004188, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:24<09:51, 3.65s/it] 69%|██████▉ | 359/520 [22:28<09:48, 3.65s/it] {'loss': 1.1698, 'grad_norm': 0.00075528152833084, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:28<09:48, 3.65s/it] 69%|██████▉ | 360/520 [22:31<09:43, 3.65s/it] {'loss': 1.1785, 'grad_norm': 0.00076692602238027, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:31<09:43, 3.65s/it] 69%|██████▉ | 361/520 [22:35<09:40, 3.65s/it] {'loss': 1.1917, 'grad_norm': 0.0006705135673081567, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:35<09:40, 3.65s/it] 70%|██████▉ | 362/520 [22:39<09:36, 3.65s/it] {'loss': 1.1678, 'grad_norm': 0.0008177525432692623, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:39<09:36, 3.65s/it] 70%|██████▉ | 363/520 [22:42<09:33, 3.65s/it] {'loss': 1.1952, 'grad_norm': 0.0007767281765820665, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:42<09:33, 3.65s/it] 70%|███████ | 364/520 [22:46<09:29, 3.65s/it] {'loss': 1.2094, 'grad_norm': 0.0007710038205303176, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:46<09:29, 3.65s/it] 70%|███████ | 365/520 [22:50<09:27, 3.66s/it] {'loss': 1.2465, 'grad_norm': 0.0008009831068315441, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:50<09:27, 3.66s/it] 70%|███████ | 366/520 [22:53<09:23, 3.66s/it] {'loss': 1.2079, 'grad_norm': 0.0007369797477477356, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:53<09:23, 3.66s/it] 71%|███████ | 367/520 [22:57<09:18, 3.65s/it] {'loss': 1.2071, 'grad_norm': 0.0007748084794074458, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [22:57<09:18, 3.65s/it] 71%|███████ | 368/520 [23:01<09:14, 3.65s/it] {'loss': 1.0635, 'grad_norm': 0.0007624498064560316, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:01<09:14, 3.65s/it] 71%|███████ | 369/520 [23:04<09:10, 3.65s/it] {'loss': 1.1678, 'grad_norm': 0.0006794198963445708, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:04<09:10, 3.65s/it] 71%|███████ | 370/520 [23:08<09:06, 3.64s/it] {'loss': 1.1257, 'grad_norm': 0.0007353375076834589, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:08<09:06, 3.64s/it] 71%|███████▏ | 371/520 [23:11<09:02, 3.64s/it] {'loss': 1.1174, 'grad_norm': 0.0008059208031421778, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:11<09:02, 3.64s/it] 72%|███████▏ | 372/520 [23:15<08:58, 3.64s/it] {'loss': 1.2397, 'grad_norm': 0.0006874259660111801, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:15<08:58, 3.64s/it] 72%|███████▏ | 373/520 [23:19<08:55, 3.64s/it] {'loss': 1.126, 'grad_norm': 0.000808111350446269, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:19<08:55, 3.64s/it] 72%|███████▏ | 374/520 [23:22<08:51, 3.64s/it] {'loss': 1.2092, 'grad_norm': 0.00078647991557552, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:22<08:51, 3.64s/it] 72%|███████▏ | 375/520 [23:26<08:49, 3.65s/it] {'loss': 1.127, 'grad_norm': 0.000749842060958536, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:26<08:49, 3.65s/it] 72%|███████▏ | 376/520 [23:30<08:44, 3.64s/it] {'loss': 1.2328, 'grad_norm': 0.0007337435447240606, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:30<08:44, 3.64s/it] 72%|███████▎ | 377/520 [23:33<08:43, 3.66s/it] {'loss': 1.1643, 'grad_norm': 0.0008403175566797746, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:33<08:43, 3.66s/it] 73%|███████▎ | 378/520 [23:37<08:39, 3.66s/it] {'loss': 1.2274, 'grad_norm': 0.0007323839732008883, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:37<08:39, 3.66s/it] 73%|███████▎ | 379/520 [23:41<08:34, 3.65s/it] {'loss': 1.1977, 'grad_norm': 0.0007194575210439787, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:41<08:34, 3.65s/it] 73%|███████▎ | 380/520 [23:44<08:29, 3.64s/it] {'loss': 1.2156, 'grad_norm': 0.0007727856048585256, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:44<08:29, 3.64s/it] 73%|███████▎ | 381/520 [23:48<08:25, 3.63s/it] {'loss': 1.2044, 'grad_norm': 0.000725516293009888, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:48<08:25, 3.63s/it] 73%|███████▎ | 382/520 [23:52<08:30, 3.70s/it] {'loss': 1.1836, 'grad_norm': 0.0007093975672917272, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:52<08:30, 3.70s/it] 74%|███████▎ | 383/520 [23:56<08:31, 3.73s/it] {'loss': 1.0472, 'grad_norm': 0.0008192675377829189, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:56<08:31, 3.73s/it] 74%|███████▍ | 384/520 [23:59<08:31, 3.76s/it] {'loss': 1.2139, 'grad_norm': 0.0006596597746481928, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [23:59<08:31, 3.76s/it] 74%|███████▍ | 385/520 [24:03<08:30, 3.78s/it] {'loss': 1.185, 'grad_norm': 0.000707576897262189, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:03<08:30, 3.78s/it] 74%|███████▍ | 386/520 [24:07<08:29, 3.80s/it] {'loss': 1.1392, 'grad_norm': 0.0006579152208126464, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:07<08:29, 3.80s/it] 74%|███████▍ | 387/520 [24:11<08:26, 3.81s/it] {'loss': 1.2393, 'grad_norm': 0.0007399414729049071, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:11<08:26, 3.81s/it] 75%|███████▍ | 388/520 [24:15<08:23, 3.82s/it] {'loss': 1.0996, 'grad_norm': 0.0007338600736533077, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:15<08:23, 3.82s/it] 75%|███████▍ | 389/520 [24:19<08:23, 3.84s/it] {'loss': 1.1432, 'grad_norm': 0.0008572756526704977, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:19<08:23, 3.84s/it] 75%|███████▌ | 390/520 [24:22<08:19, 3.84s/it] {'loss': 1.2094, 'grad_norm': 0.0007349388975827815, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:22<08:19, 3.84s/it] 75%|███████▌ | 391/520 [24:26<08:15, 3.84s/it] {'loss': 1.2743, 'grad_norm': 0.0007719351461225342, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:26<08:15, 3.84s/it] 75%|███████▌ | 392/520 [24:30<08:11, 3.84s/it] {'loss': 1.0993, 'grad_norm': 0.0007346189556990484, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:30<08:11, 3.84s/it] 76%|███████▌ | 393/520 [24:34<08:06, 3.83s/it] {'loss': 1.0922, 'grad_norm': 0.0006516616286104961, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:34<08:06, 3.83s/it] 76%|███████▌ | 394/520 [24:38<08:02, 3.83s/it] {'loss': 1.1668, 'grad_norm': 0.0008061080117258709, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:38<08:02, 3.83s/it] 76%|███████▌ | 395/520 [24:42<07:58, 3.83s/it] {'loss': 1.1329, 'grad_norm': 0.0008131792767792341, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:42<07:58, 3.83s/it] 76%|███████▌ | 396/520 [24:45<07:53, 3.82s/it] {'loss': 1.2135, 'grad_norm': 0.0008099270186714321, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:45<07:53, 3.82s/it] 76%|███████▋ | 397/520 [24:49<07:50, 3.83s/it] {'loss': 1.1849, 'grad_norm': 0.0007485010760733122, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:49<07:50, 3.83s/it] 77%|███████▋ | 398/520 [24:53<07:46, 3.83s/it] {'loss': 1.1857, 'grad_norm': 0.0008015259819592186, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:53<07:46, 3.83s/it] 77%|███████▋ | 399/520 [24:57<07:43, 3.83s/it] {'loss': 1.1277, 'grad_norm': 0.0007193576549842724, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [24:57<07:43, 3.83s/it] 77%|███████▋ | 400/520 [25:01<07:40, 3.84s/it] {'loss': 1.1594, 'grad_norm': 0.000687958842048251, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:01<07:40, 3.84s/it] 77%|███████▋ | 401/520 [25:05<07:37, 3.84s/it] {'loss': 1.0228, 'grad_norm': 0.000828748276580897, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:05<07:37, 3.84s/it] 77%|███████▋ | 402/520 [25:09<07:34, 3.85s/it] {'loss': 1.1503, 'grad_norm': 0.0007767301910225717, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:09<07:34, 3.85s/it] 78%|███████▊ | 403/520 [25:12<07:30, 3.85s/it] {'loss': 1.1709, 'grad_norm': 0.0008247018094949323, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:12<07:30, 3.85s/it] 78%|███████▊ | 404/520 [25:16<07:24, 3.83s/it] {'loss': 1.0857, 'grad_norm': 0.000878659358511879, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:16<07:24, 3.83s/it] 78%|███████▊ | 405/520 [25:20<07:20, 3.83s/it] {'loss': 1.1401, 'grad_norm': 0.0007403187828652638, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:20<07:20, 3.83s/it] 78%|███████▊ | 406/520 [25:24<07:16, 3.83s/it] {'loss': 1.0603, 'grad_norm': 0.0008775301334346362, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:24<07:16, 3.83s/it] 78%|███████▊ | 407/520 [25:28<07:13, 3.83s/it] {'loss': 1.2528, 'grad_norm': 0.0007913893357576924, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:28<07:13, 3.83s/it] 78%|███████▊ | 408/520 [25:31<07:08, 3.83s/it] {'loss': 1.1637, 'grad_norm': 0.0008281188708878117, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:31<07:08, 3.83s/it] 79%|███████▊ | 409/520 [25:35<07:04, 3.83s/it] {'loss': 1.2795, 'grad_norm': 0.0008135902809161524, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:35<07:04, 3.83s/it] 79%|███████▉ | 410/520 [25:39<07:01, 3.83s/it] {'loss': 1.0255, 'grad_norm': 0.0007700659034238151, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:39<07:01, 3.83s/it] 79%|███████▉ | 411/520 [25:43<06:57, 3.83s/it] {'loss': 1.2606, 'grad_norm': 0.0008237266274258579, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:43<06:57, 3.83s/it] 79%|███████▉ | 412/520 [25:47<06:59, 3.88s/it] {'loss': 1.1705, 'grad_norm': 0.0007616610886510091, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:47<06:59, 3.88s/it] 79%|███████▉ | 413/520 [25:51<06:55, 3.88s/it] {'loss': 1.156, 'grad_norm': 0.0008226530210740326, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:51<06:55, 3.88s/it] 80%|███████▉ | 414/520 [25:55<06:50, 3.87s/it] {'loss': 0.9669, 'grad_norm': 0.0006282043401732801, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:55<06:50, 3.87s/it] 80%|███████▉ | 415/520 [25:59<06:45, 3.87s/it] {'loss': 1.1536, 'grad_norm': 0.0007252593739856877, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [25:59<06:45, 3.87s/it] 80%|████████ | 416/520 [26:02<06:35, 3.81s/it] {'loss': 1.0593, 'grad_norm': 0.0008287072802403531, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:02<06:35, 3.81s/it] 80%|████████ | 417/520 [26:06<06:26, 3.75s/it] {'loss': 1.2224, 'grad_norm': 0.0007660893792232519, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:06<06:26, 3.75s/it] 80%|████████ | 418/520 [26:09<06:19, 3.72s/it] {'loss': 1.2164, 'grad_norm': 0.0007243352456310566, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:09<06:19, 3.72s/it] 81%|████████ | 419/520 [26:13<06:11, 3.68s/it] {'loss': 1.2069, 'grad_norm': 0.0008370959201882314, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:13<06:11, 3.68s/it] 81%|████████ | 420/520 [26:17<06:06, 3.66s/it] {'loss': 1.0999, 'grad_norm': 0.0008213786730481642, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:17<06:06, 3.66s/it] 81%|████████ | 421/520 [26:20<06:01, 3.65s/it] {'loss': 1.0352, 'grad_norm': 0.000796492923606667, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:20<06:01, 3.65s/it] 81%|████████ | 422/520 [26:24<05:56, 3.64s/it] {'loss': 1.1579, 'grad_norm': 0.0007928919597200367, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:24<05:56, 3.64s/it] 81%|████████▏ | 423/520 [26:28<05:52, 3.63s/it] {'loss': 1.1266, 'grad_norm': 0.0008265460376129695, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:28<05:52, 3.63s/it] 82%|████████▏ | 424/520 [26:31<05:48, 3.63s/it] {'loss': 1.2425, 'grad_norm': 0.0007179779685000766, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:31<05:48, 3.63s/it] 82%|████████▏ | 425/520 [26:35<05:44, 3.62s/it] {'loss': 1.1481, 'grad_norm': 0.0007581856942977702, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:35<05:44, 3.62s/it] 82%|████████▏ | 426/520 [26:38<05:41, 3.63s/it] {'loss': 1.1756, 'grad_norm': 0.0009899386199012216, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:38<05:41, 3.63s/it] 82%|████████▏ | 427/520 [26:42<05:37, 3.63s/it] {'loss': 1.0821, 'grad_norm': 0.0007504931868151989, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:42<05:37, 3.63s/it] 82%|████████▏ | 428/520 [26:46<05:32, 3.62s/it] {'loss': 1.0693, 'grad_norm': 0.0008257066631227697, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:46<05:32, 3.62s/it] 82%|████████▎ | 429/520 [26:49<05:29, 3.62s/it] {'loss': 1.1645, 'grad_norm': 0.0007789679676834777, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:49<05:29, 3.62s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:53<05:25, 3.62s/it] {'loss': 1.1632, 'grad_norm': 0.0007259196191882298, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:53<05:25, 3.62s/it] 83%|████████▎ | 431/520 [26:56<05:21, 3.61s/it] {'loss': 1.1292, 'grad_norm': 0.0007477486304579058, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [26:56<05:21, 3.61s/it] 83%|████████▎ | 432/520 [27:00<05:18, 3.62s/it] {'loss': 1.0739, 'grad_norm': 0.000783027113703602, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:00<05:18, 3.62s/it] 83%|████████▎ | 433/520 [27:04<05:14, 3.62s/it] {'loss': 1.2069, 'grad_norm': 0.0007531533397221447, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:04<05:14, 3.62s/it] 83%|████████▎ | 434/520 [27:07<05:10, 3.61s/it] {'loss': 0.9564, 'grad_norm': 0.0007826955508290629, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:07<05:10, 3.61s/it] 84%|████████▎ | 435/520 [27:11<05:06, 3.61s/it] {'loss': 1.2362, 'grad_norm': 0.000826346238468892, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:11<05:06, 3.61s/it] 84%|████████▍ | 436/520 [27:15<05:03, 3.61s/it] {'loss': 1.0453, 'grad_norm': 0.0007894682513075077, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:15<05:03, 3.61s/it] 84%|████████▍ | 437/520 [27:18<05:00, 3.62s/it] {'loss': 1.2586, 'grad_norm': 0.0007839929449792159, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:18<05:00, 3.62s/it] 84%|████████▍ | 438/520 [27:22<04:57, 3.63s/it] {'loss': 1.0815, 'grad_norm': 0.0007770170512691752, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:22<04:57, 3.63s/it] 84%|████████▍ | 439/520 [27:25<04:54, 3.63s/it] {'loss': 1.1125, 'grad_norm': 0.0006243194310958181, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:25<04:54, 3.63s/it] 85%|████████▍ | 440/520 [27:29<04:50, 3.63s/it] {'loss': 1.1143, 'grad_norm': 0.0008015309089058369, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:29<04:50, 3.63s/it] 85%|████████▍ | 441/520 [27:33<04:47, 3.64s/it] {'loss': 1.126, 'grad_norm': 0.0007720101030276253, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:33<04:47, 3.64s/it] 85%|████████▌ | 442/520 [27:36<04:43, 3.64s/it] {'loss': 1.1787, 'grad_norm': 0.0008451228978910961, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:36<04:43, 3.64s/it] 85%|████████▌ | 443/520 [27:40<04:39, 3.64s/it] {'loss': 1.191, 'grad_norm': 0.0007461153510989918, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:40<04:39, 3.64s/it] 85%|████████▌ | 444/520 [27:44<04:37, 3.65s/it] {'loss': 1.1564, 'grad_norm': 0.0006872790649890881, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:44<04:37, 3.65s/it] 86%|████████▌ | 445/520 [27:47<04:32, 3.64s/it] {'loss': 1.0808, 'grad_norm': 0.000733614388282848, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:47<04:32, 3.64s/it] 86%|████████▌ | 446/520 [27:51<04:29, 3.64s/it] {'loss': 1.202, 'grad_norm': 0.0006805452578786265, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:51<04:29, 3.64s/it] 86%|████████▌ | 447/520 [27:55<04:25, 3.64s/it] {'loss': 1.1564, 'grad_norm': 0.0007515032524038676, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [27:55<04:25, 3.64s/it] 86%|████████▌ | 448/520 [27:58<04:22, 3.64s/it] {'loss': 1.1541, 'grad_norm': 0.0008118347908882632, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [27:58<04:22, 3.64s/it] 86%|████████▋ | 449/520 [28:02<04:18, 3.64s/it] {'loss': 1.1598, 'grad_norm': 0.0007612260371237144, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:02<04:18, 3.64s/it] 87%|████████▋ | 450/520 [28:05<04:14, 3.63s/it] {'loss': 1.1818, 'grad_norm': 0.0007898339132082143, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:05<04:14, 3.63s/it] 87%|████████▋ | 451/520 [28:09<04:11, 3.64s/it] {'loss': 1.183, 'grad_norm': 0.0007960310401482085, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:09<04:11, 3.64s/it] 87%|████████▋ | 452/520 [28:13<04:07, 3.64s/it] {'loss': 1.2068, 'grad_norm': 0.0007045427235233688, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:13<04:07, 3.64s/it] 87%|████████▋ | 453/520 [28:16<04:03, 3.64s/it] {'loss': 1.1835, 'grad_norm': 0.0007166159430647453, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:16<04:03, 3.64s/it] 87%|████████▋ | 454/520 [28:20<04:00, 3.65s/it] {'loss': 1.0888, 'grad_norm': 0.0007693425781429305, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:20<04:00, 3.65s/it] 88%|████████▊ | 455/520 [28:24<03:57, 3.65s/it] {'loss': 1.2294, 'grad_norm': 0.0007640966626195228, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:24<03:57, 3.65s/it] 88%|████████▊ | 456/520 [28:27<03:53, 3.65s/it] {'loss': 1.1619, 'grad_norm': 0.0007874197207772454, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:27<03:53, 3.65s/it] 88%|████████▊ | 457/520 [28:31<03:50, 3.65s/it] {'loss': 1.0752, 'grad_norm': 0.00065151276205993, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:31<03:50, 3.65s/it] 88%|████████▊ | 458/520 [28:35<03:46, 3.65s/it] {'loss': 1.2827, 'grad_norm': 0.0008337595118390717, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:35<03:46, 3.65s/it] 88%|████████▊ | 459/520 [28:38<03:43, 3.66s/it] {'loss': 1.2144, 'grad_norm': 0.0007522009436680444, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:38<03:43, 3.66s/it] 88%|████████▊ | 460/520 [28:42<03:39, 3.65s/it] {'loss': 1.1052, 'grad_norm': 0.0007500415062527459, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:42<03:39, 3.65s/it] 89%|████████▊ | 461/520 [28:46<03:35, 3.65s/it] {'loss': 1.1584, 'grad_norm': 0.000573405633302142, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:46<03:35, 3.65s/it] 89%|████████▉ | 462/520 [28:49<03:32, 3.66s/it] {'loss': 1.2513, 'grad_norm': 0.0007347883768845491, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:49<03:32, 3.66s/it] 89%|████████▉ | 463/520 [28:53<03:27, 3.64s/it] {'loss': 1.0715, 'grad_norm': 0.0007901839956469574, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [28:53<03:27, 3.64s/it] 89%|████████▉ | 464/520 [28:57<03:23, 3.63s/it] {'loss': 1.1942, 'grad_norm': 0.0007823530130477797, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [28:57<03:23, 3.63s/it] 89%|████████▉ | 465/520 [29:00<03:19, 3.63s/it] {'loss': 1.2971, 'grad_norm': 0.0007980051199809583, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:00<03:19, 3.63s/it] 90%|████████▉ | 466/520 [29:04<03:16, 3.64s/it] {'loss': 1.1909, 'grad_norm': 0.0006950453583952655, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:04<03:16, 3.64s/it] 90%|████████▉ | 467/520 [29:08<03:14, 3.67s/it] {'loss': 1.1395, 'grad_norm': 0.0007010873339774308, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:08<03:14, 3.67s/it] 90%|█████████ | 468/520 [29:11<03:10, 3.66s/it] {'loss': 1.1605, 'grad_norm': 0.000876678524918876, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:11<03:10, 3.66s/it] 90%|█████████ | 469/520 [29:15<03:06, 3.65s/it] {'loss': 1.2279, 'grad_norm': 0.0008302140304189045, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:15<03:06, 3.65s/it] 90%|█████████ | 470/520 [29:19<03:02, 3.65s/it] {'loss': 1.1028, 'grad_norm': 0.0007082993081240793, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:19<03:02, 3.65s/it] 91%|█████████ | 471/520 [29:22<02:58, 3.64s/it] {'loss': 1.1267, 'grad_norm': 0.000820738503497942, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:22<02:58, 3.64s/it] 91%|█████████ | 472/520 [29:26<02:56, 3.68s/it] {'loss': 1.0982, 'grad_norm': 0.000730779738881609, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:26<02:56, 3.68s/it] 91%|█████████ | 473/520 [29:30<02:53, 3.69s/it] {'loss': 1.166, 'grad_norm': 0.0007825182923685632, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:30<02:53, 3.69s/it] 91%|█████████ | 474/520 [29:33<02:49, 3.68s/it] {'loss': 1.1751, 'grad_norm': 0.0007087640087055456, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:33<02:49, 3.68s/it] 91%|█████████▏| 475/520 [29:37<02:45, 3.67s/it] {'loss': 1.0914, 'grad_norm': 0.0006995965139793583, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:37<02:45, 3.67s/it] 92%|█████████▏| 476/520 [29:41<02:43, 3.72s/it] {'loss': 1.1514, 'grad_norm': 0.0008004056109449833, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:41<02:43, 3.72s/it] 92%|█████████▏| 477/520 [29:44<02:39, 3.72s/it] {'loss': 1.1473, 'grad_norm': 0.0008592356201903817, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:44<02:39, 3.72s/it] 92%|█████████▏| 478/520 [29:48<02:37, 3.74s/it] {'loss': 1.0927, 'grad_norm': 0.0007570870109682904, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:48<02:37, 3.74s/it] 92%|█████████▏| 479/520 [29:52<02:32, 3.73s/it] {'loss': 1.145, 'grad_norm': 0.000796607965723838, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:52<02:32, 3.73s/it] 92%|█████████▏| 480/520 [29:56<02:28, 3.71s/it] {'loss': 1.1606, 'grad_norm': 0.0007073700841638958, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [29:56<02:28, 3.71s/it] 92%|█████████▎| 481/520 [29:59<02:23, 3.69s/it] {'loss': 1.1509, 'grad_norm': 0.0006774011227219453, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [29:59<02:23, 3.69s/it] 93%|█████████▎| 482/520 [30:03<02:19, 3.68s/it] {'loss': 1.1702, 'grad_norm': 0.0007020721506308517, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:03<02:19, 3.68s/it] 93%|█████████▎| 483/520 [30:07<02:15, 3.66s/it] {'loss': 1.1593, 'grad_norm': 0.0007466163332912037, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:07<02:15, 3.66s/it] 93%|█████████▎| 484/520 [30:10<02:11, 3.66s/it] {'loss': 1.1666, 'grad_norm': 0.0007896722945679942, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:10<02:11, 3.66s/it] 93%|█████████▎| 485/520 [30:14<02:08, 3.66s/it] {'loss': 1.1217, 'grad_norm': 0.0007383213782338606, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:14<02:08, 3.66s/it] 93%|█████████▎| 486/520 [30:18<02:04, 3.67s/it] {'loss': 1.241, 'grad_norm': 0.0007971291915227861, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:18<02:04, 3.67s/it] 94%|█████████▎| 487/520 [30:21<02:01, 3.69s/it] {'loss': 1.1017, 'grad_norm': 0.000747562317140795, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:21<02:01, 3.69s/it] 94%|█████████▍| 488/520 [30:25<01:57, 3.67s/it] {'loss': 1.0451, 'grad_norm': 0.0007794661480808944, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:25<01:57, 3.67s/it] 94%|█████████▍| 489/520 [30:29<01:53, 3.66s/it] {'loss': 1.1736, 'grad_norm': 0.0006484553285008182, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:29<01:53, 3.66s/it] 94%|█████████▍| 490/520 [30:32<01:49, 3.66s/it] {'loss': 1.1637, 'grad_norm': 0.0007834514913110468, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:32<01:49, 3.66s/it] 94%|█████████▍| 491/520 [30:36<01:45, 3.65s/it] {'loss': 1.1306, 'grad_norm': 0.0007962087471548336, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:36<01:45, 3.65s/it] 95%|█████████▍| 492/520 [30:40<01:42, 3.66s/it] {'loss': 1.2411, 'grad_norm': 0.000782850505511218, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:40<01:42, 3.66s/it] 95%|█████████▍| 493/520 [30:43<01:38, 3.65s/it] {'loss': 1.1667, 'grad_norm': 0.000756940665544635, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:43<01:38, 3.65s/it] 95%|█████████▌| 494/520 [30:47<01:35, 3.66s/it] {'loss': 1.175, 'grad_norm': 0.0007067901679305809, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:47<01:35, 3.66s/it] 95%|█████████▌| 495/520 [30:50<01:31, 3.65s/it] {'loss': 1.1512, 'grad_norm': 0.0008591467692936483, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [30:50<01:31, 3.65s/it] 95%|█████████▌| 496/520 [30:54<01:27, 3.65s/it] {'loss': 1.0753, 'grad_norm': 0.0007919010231669738, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [30:54<01:27, 3.65s/it] 96%|█████████▌| 497/520 [30:58<01:24, 3.65s/it] {'loss': 1.1008, 'grad_norm': 0.000647905579262987, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [30:58<01:24, 3.65s/it] 96%|█████████▌| 498/520 [31:01<01:20, 3.65s/it] {'loss': 1.1414, 'grad_norm': 0.0007592907084942983, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:01<01:20, 3.65s/it] 96%|█████████▌| 499/520 [31:05<01:16, 3.65s/it] {'loss': 1.2366, 'grad_norm': 0.0007105939921813743, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:05<01:16, 3.65s/it] 96%|█████████▌| 500/520 [31:09<01:12, 3.65s/it] {'loss': 1.2644, 'grad_norm': 0.0009255606973759983, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:09<01:12, 3.65s/it] 96%|█████████▋| 501/520 [31:12<01:09, 3.64s/it] {'loss': 1.1428, 'grad_norm': 0.0007882317049030687, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:12<01:09, 3.64s/it] 97%|█████████▋| 502/520 [31:16<01:05, 3.64s/it] {'loss': 1.1776, 'grad_norm': 0.0007358224335368026, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:16<01:05, 3.64s/it] 97%|█████████▋| 503/520 [31:20<01:01, 3.65s/it] {'loss': 1.1337, 'grad_norm': 0.000749314883332642, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:20<01:01, 3.65s/it] 97%|█████████▋| 504/520 [31:23<00:58, 3.65s/it] {'loss': 1.1709, 'grad_norm': 0.0008866083928272948, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:23<00:58, 3.65s/it] 97%|█████████▋| 505/520 [31:27<00:54, 3.64s/it] {'loss': 1.2007, 'grad_norm': 0.0007667751021655594, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:27<00:54, 3.64s/it] 97%|█████████▋| 506/520 [31:31<00:51, 3.65s/it] {'loss': 1.1347, 'grad_norm': 0.000807818469096502, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:31<00:51, 3.65s/it] 98%|█████████▊| 507/520 [31:34<00:47, 3.65s/it] {'loss': 1.2767, 'grad_norm': 0.0006988869971388798, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:34<00:47, 3.65s/it] 98%|█████████▊| 508/520 [31:38<00:43, 3.64s/it] {'loss': 1.2488, 'grad_norm': 0.0007876264579938413, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:38<00:43, 3.64s/it] 98%|█████████▊| 509/520 [31:42<00:40, 3.64s/it] {'loss': 1.2234, 'grad_norm': 0.0007452408587866015, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:42<00:40, 3.64s/it] 98%|█████████▊| 510/520 [31:45<00:36, 3.65s/it] {'loss': 1.1697, 'grad_norm': 0.0007656194186295468, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:45<00:36, 3.65s/it] 98%|█████████▊| 511/520 [31:49<00:32, 3.65s/it] {'loss': 1.136, 'grad_norm': 0.0007504440934000855, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [31:49<00:32, 3.65s/it] 98%|█████████▊| 512/520 [31:52<00:29, 3.64s/it] {'loss': 1.0286, 'grad_norm': 0.0007551251011049409, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [31:52<00:29, 3.64s/it] 99%|█████████▊| 513/520 [31:56<00:25, 3.65s/it] {'loss': 1.223, 'grad_norm': 0.0008730774767695594, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [31:56<00:25, 3.65s/it] 99%|█████████▉| 514/520 [32:00<00:21, 3.65s/it] {'loss': 1.19, 'grad_norm': 0.0007056527173912447, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:00<00:21, 3.65s/it] 99%|█████████▉| 515/520 [32:03<00:18, 3.64s/it] {'loss': 1.2388, 'grad_norm': 0.0009119535842282939, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:03<00:18, 3.64s/it] 99%|█████████▉| 516/520 [32:07<00:14, 3.63s/it] {'loss': 1.1557, 'grad_norm': 0.0007629415707748374, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:07<00:14, 3.63s/it] 99%|█████████▉| 517/520 [32:11<00:10, 3.61s/it] {'loss': 1.1741, 'grad_norm': 0.0007221010451804406, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:11<00:10, 3.61s/it] 100%|█████████▉| 518/520 [32:14<00:07, 3.59s/it] {'loss': 1.163, 'grad_norm': 0.0008005277492913136, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:14<00:07, 3.59s/it] 100%|█████████▉| 519/520 [32:18<00:03, 3.58s/it] {'loss': 1.1456, 'grad_norm': 0.0007366413546886686, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:18<00:03, 3.58s/it] 100%|██████████| 520/520 [32:22<00:00, 3.83s/it] {'loss': 1.1382, 'grad_norm': 0.0006645904242524036, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:22<00:00, 3.83s/it] {'train_runtime': 1942.5801, 'train_samples_per_second': 34.248, 'train_steps_per_second': 0.268, 'train_loss': 1.2183201442544276, 'epoch': 1.0} + 100%|██████████| 520/520 [32:22<00:00, 3.83s/it] 100%|██████████| 520/520 [32:22<00:00, 3.74s/it] +[2025-10-13 18:04:24,034] [INFO] [launch.py:348:main] Process 928314 exits successfully. +[2025-10-13 18:04:24,035] [INFO] [launch.py:348:main] Process 928311 exits successfully. +[2025-10-13 18:04:24,035] [INFO] [launch.py:348:main] Process 928310 exits successfully. +[2025-10-13 18:04:24,035] [INFO] [launch.py:348:main] Process 928308 exits successfully. +[2025-10-13 18:04:25,037] [INFO] [launch.py:348:main] Process 928313 exits successfully. +[2025-10-13 18:04:25,037] [INFO] [launch.py:348:main] Process 928312 exits successfully. +[2025-10-13 18:04:25,038] [INFO] [launch.py:348:main] Process 928309 exits successfully. +[2025-10-13 18:04:28,041] [INFO] [launch.py:348:main] Process 928307 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.1_2e-1_connector-5.0_1.1_2e-1_ablation_20251013_173027.log +Timestamp: 2025-10-13 18:04:30 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation_20251013_180430.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation_20251013_180430.log new file mode 100644 index 0000000000000000000000000000000000000000..119c610bde735f6e929f426d1c0b289bd2ddfe47 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation_20251013_180430.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation_20251013_180430.log +Timestamp: 2025-10-13 18:04:30 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 18:04:33,241] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:36,152] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 18:04:36,153] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 1.3 --temperature_mlp_text 1.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 1.3 --temperature_mlp_vision 1.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 1.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 18:04:38,683] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:39,740] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 18:04:39,740] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 18:04:39,740] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 18:04:39,740] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 18:04:39,740] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 18:04:39,740] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 18:04:39,740] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 18:04:39,742] [INFO] [launch.py:253:main] process 948059 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:04:39,744] [INFO] [launch.py:253:main] process 948060 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:04:39,747] [INFO] [launch.py:253:main] process 948061 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:04:39,749] [INFO] [launch.py:253:main] process 948062 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:04:39,751] [INFO] [launch.py:253:main] process 948063 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:04:39,753] [INFO] [launch.py:253:main] process 948064 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:04:39,755] [INFO] [launch.py:253:main] process 948065 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:04:39,757] [INFO] [launch.py:253:main] process 948066 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 18:04:46,501] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:46,748] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:46,748] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:46,806] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:46,816] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:46,817] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:46,821] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:46,827] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:04:46,916] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:04:47,158] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:04:47,159] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 18:04:47,161] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:04:47,213] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:04:47,213] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:04:47,215] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:04:47,228] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:04:47,243] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.3, 'temperature_mlp': 1.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.3, + "temperature_mlp": 1.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:948059:948059 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948059:948059 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948059:948059 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:948059:948059 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:948059:948059 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:948059:948059 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:948061:948061 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:948061:948061 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948061:948061 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948061:948061 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:948061:948061 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:948061:948061 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:948062:948062 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:948062:948062 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948062:948062 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948062:948062 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:948062:948062 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:948062:948062 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:948066:948066 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:948066:948066 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948066:948066 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948066:948066 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:948066:948066 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:948066:948066 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:948060:948060 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:948060:948060 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948060:948060 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948060:948060 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:948060:948060 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:948060:948060 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:948065:948065 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:948065:948065 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948065:948065 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948065:948065 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:948065:948065 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:948065:948065 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:948063:948063 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:948063:948063 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948063:948063 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948063:948063 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:948063:948063 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:948063:948063 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:948064:948064 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:948064:948064 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948064:948064 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948064:948064 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:948064:948064 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:948064:948064 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO ncclCommInitRank comm 0x55751dbebd50 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x4160752c26a79a63 - Init START +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO ncclCommInitRank comm 0x55f00ed623e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x4160752c26a79a63 - Init START +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO ncclCommInitRank comm 0x56172220f680 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x4160752c26a79a63 - Init START +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO ncclCommInitRank comm 0x55c57a1f17f0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x4160752c26a79a63 - Init START +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO ncclCommInitRank comm 0x55cbf819b580 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x4160752c26a79a63 - Init START +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO ncclCommInitRank comm 0x55961de4ac60 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x4160752c26a79a63 - Init START +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO ncclCommInitRank comm 0x55e89ab4cf50 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x4160752c26a79a63 - Init START +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO ncclCommInitRank comm 0x5567288ea400 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x4160752c26a79a63 - Init START +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO comm 0x5567288ea400 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO comm 0x55cbf819b580 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO comm 0x55e89ab4cf50 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO comm 0x55961de4ac60 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO comm 0x55751dbebd50 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO comm 0x55f00ed623e0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO comm 0x56172220f680 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO comm 0x55c57a1f17f0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:948065:949659 [6] NCCL INFO ncclCommInitRank comm 0x55f00ed623e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x4160752c26a79a63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:948064:949678 [5] NCCL INFO ncclCommInitRank comm 0x56172220f680 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x4160752c26a79a63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948066:949641 [7] NCCL INFO ncclCommInitRank comm 0x55751dbebd50 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x4160752c26a79a63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948060:949658 [1] NCCL INFO ncclCommInitRank comm 0x55cbf819b580 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x4160752c26a79a63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948059:949638 [0] NCCL INFO ncclCommInitRank comm 0x55e89ab4cf50 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x4160752c26a79a63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948061:949639 [2] NCCL INFO ncclCommInitRank comm 0x5567288ea400 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x4160752c26a79a63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948062:949640 [3] NCCL INFO ncclCommInitRank comm 0x55961de4ac60 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x4160752c26a79a63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948063:949660 [4] NCCL INFO ncclCommInitRank comm 0x55c57a1f17f0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x4160752c26a79a63 - Init COMPLETE +[2025-10-13 18:05:33,826] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 18:05:35,586] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 18:05:53,765 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 18:05:53,784 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:948064:954644 [5] NCCL INFO ncclCommInitRank comm 0x7f978c06b090 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xfe0a87c853d01cb4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948065:954645 [6] NCCL INFO ncclCommInitRank comm 0x7f669806b000 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xfe0a87c853d01cb4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948059:954639 [0] NCCL INFO ncclCommInitRank comm 0x7f0ca006b2a0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xfe0a87c853d01cb4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948061:954646 [2] NCCL INFO ncclCommInitRank comm 0x7f181806a470 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xfe0a87c853d01cb4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948060:954641 [1] NCCL INFO ncclCommInitRank comm 0x7f15c006a9a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xfe0a87c853d01cb4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948063:954640 [4] NCCL INFO ncclCommInitRank comm 0x7f571006ac30 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xfe0a87c853d01cb4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948066:954642 [7] NCCL INFO ncclCommInitRank comm 0x7f478406a390 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xfe0a87c853d01cb4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:948062:954643 [3] NCCL INFO ncclCommInitRank comm 0x7faae006b0e0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xfe0a87c853d01cb4 - Init COMPLETE + 0%| | 1/520 [00:14<2:06:07, 14.58s/it] {'loss': 2.045, 'grad_norm': 0.008963252844112693, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:06:07, 14.58s/it] 0%| | 2/520 [00:18<1:11:37, 8.30s/it] {'loss': 2.0521, 'grad_norm': 0.009714886642272642, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:11:37, 8.30s/it] 1%| | 3/520 [00:22<54:14, 6.29s/it] {'loss': 2.189, 'grad_norm': 0.011169021724587138, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:22<54:14, 6.29s/it] 1%| | 4/520 [00:26<45:56, 5.34s/it] {'loss': 1.6829, 'grad_norm': 0.003555954347361074, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:26<45:56, 5.34s/it] 1%| | 5/520 [00:30<41:23, 4.82s/it] {'loss': 1.7032, 'grad_norm': 0.003540859985806969, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:30<41:23, 4.82s/it] 1%| | 6/520 [00:34<38:47, 4.53s/it] {'loss': 1.3967, 'grad_norm': 0.0013526883691342285, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:34<38:47, 4.53s/it] 1%|▏ | 7/520 [00:37<36:51, 4.31s/it] {'loss': 1.4759, 'grad_norm': 0.0015111493777634347, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:38<36:51, 4.31s/it] 2%|▏ | 8/520 [00:42<37:16, 4.37s/it] {'loss': 1.4884, 'grad_norm': 0.0010810084515702626, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:42<37:16, 4.37s/it] 2%|▏ | 9/520 [00:46<35:51, 4.21s/it] {'loss': 1.5488, 'grad_norm': 0.0013651075649354276, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:46<35:51, 4.21s/it] 2%|▏ | 10/520 [00:50<34:51, 4.10s/it] {'loss': 1.4094, 'grad_norm': 0.0015541562379688485, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:50<34:51, 4.10s/it] 2%|▏ | 11/520 [00:54<34:30, 4.07s/it] {'loss': 1.453, 'grad_norm': 0.001129480546612534, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:54<34:30, 4.07s/it] 2%|▏ | 12/520 [00:58<33:55, 4.01s/it] {'loss': 1.3319, 'grad_norm': 0.0009122502860676478, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:58<33:55, 4.01s/it][2025-10-13 18:07:00,835] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:02<35:15, 4.17s/it] {'loss': 1.3782, 'grad_norm': 0.0009614585168919808, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:02<35:15, 4.17s/it] 3%|▎ | 14/520 [01:06<34:25, 4.08s/it] {'loss': 1.435, 'grad_norm': 0.001232224696482955, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:06<34:25, 4.08s/it] 3%|▎ | 15/520 [01:10<33:52, 4.02s/it] {'loss': 1.3671, 'grad_norm': 0.0009499439754225466, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:10<33:52, 4.02s/it] 3%|▎ | 16/520 [01:14<33:25, 3.98s/it] {'loss': 1.3273, 'grad_norm': 0.0008661337231164598, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:14<33:25, 3.98s/it] 3%|▎ | 17/520 [01:18<33:20, 3.98s/it] {'loss': 1.4479, 'grad_norm': 0.0007538088684748947, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:18<33:20, 3.98s/it] 3%|▎ | 18/520 [01:22<33:02, 3.95s/it] {'loss': 1.3082, 'grad_norm': 0.0010352542083124183, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:22<33:02, 3.95s/it] 4%|▎ | 19/520 [01:25<32:47, 3.93s/it] {'loss': 1.3291, 'grad_norm': 0.0009985866945296648, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:25<32:47, 3.93s/it] 4%|▍ | 20/520 [01:29<32:35, 3.91s/it] {'loss': 1.2867, 'grad_norm': 0.0008991332838227083, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:29<32:35, 3.91s/it] 4%|▍ | 21/520 [01:33<32:30, 3.91s/it] {'loss': 1.3207, 'grad_norm': 0.000895394824535955, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:33<32:30, 3.91s/it] 4%|▍ | 22/520 [01:37<32:21, 3.90s/it] {'loss': 1.4295, 'grad_norm': 0.0007577254182679784, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:37<32:21, 3.90s/it] 4%|▍ | 23/520 [01:41<31:48, 3.84s/it] {'loss': 1.3811, 'grad_norm': 0.0008955979921940843, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:41<31:48, 3.84s/it] 5%|▍ | 24/520 [01:44<31:08, 3.77s/it] {'loss': 1.3019, 'grad_norm': 0.0008311116708203572, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:44<31:08, 3.77s/it] 5%|▍ | 25/520 [01:48<30:41, 3.72s/it] {'loss': 1.3736, 'grad_norm': 0.000967780969266851, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:48<30:41, 3.72s/it] 5%|▌ | 26/520 [01:52<30:28, 3.70s/it] {'loss': 1.3297, 'grad_norm': 0.0007750546444467113, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:52<30:28, 3.70s/it] 5%|▌ | 27/520 [01:55<30:16, 3.69s/it] {'loss': 1.2591, 'grad_norm': 0.00083042364058503, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:55<30:16, 3.69s/it] 5%|▌ | 28/520 [01:59<30:06, 3.67s/it] {'loss': 1.2786, 'grad_norm': 0.0008684710100125774, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:59<30:06, 3.67s/it] 6%|▌ | 29/520 [02:03<29:59, 3.67s/it] {'loss': 1.3034, 'grad_norm': 0.0008781034239531097, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:03<29:59, 3.67s/it] 6%|▌ | 30/520 [02:06<30:06, 3.69s/it] {'loss': 1.3689, 'grad_norm': 0.0006822102917788432, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:06<30:06, 3.69s/it] 6%|▌ | 31/520 [02:10<29:54, 3.67s/it] {'loss': 1.2711, 'grad_norm': 0.0007091367212327024, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:10<29:54, 3.67s/it] 6%|▌ | 32/520 [02:14<29:48, 3.66s/it] {'loss': 1.206, 'grad_norm': 0.0007009837472277116, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:14<29:48, 3.66s/it] 6%|▋ | 33/520 [02:17<29:38, 3.65s/it] {'loss': 1.2737, 'grad_norm': 0.0008859342977087534, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:17<29:38, 3.65s/it] 7%|▋ | 34/520 [02:21<29:30, 3.64s/it] {'loss': 1.2637, 'grad_norm': 0.0008567074255783606, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:21<29:30, 3.64s/it] 7%|▋ | 35/520 [02:25<29:23, 3.64s/it] {'loss': 1.2721, 'grad_norm': 0.0009466421214983755, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:25<29:23, 3.64s/it] 7%|▋ | 36/520 [02:28<29:22, 3.64s/it] {'loss': 1.361, 'grad_norm': 0.0007476250738513211, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:28<29:22, 3.64s/it] 7%|▋ | 37/520 [02:32<29:18, 3.64s/it] {'loss': 1.3495, 'grad_norm': 0.0006993771522280268, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:32<29:18, 3.64s/it] 7%|▋ | 38/520 [02:35<29:15, 3.64s/it] {'loss': 1.436, 'grad_norm': 0.0007570708052434221, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:35<29:15, 3.64s/it] 8%|▊ | 39/520 [02:39<29:03, 3.62s/it] {'loss': 1.2965, 'grad_norm': 0.0009265387241577057, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:39<29:03, 3.62s/it] 8%|▊ | 40/520 [02:43<28:53, 3.61s/it] {'loss': 1.3285, 'grad_norm': 0.0007476492363167563, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:43<28:53, 3.61s/it] 8%|▊ | 41/520 [02:46<28:50, 3.61s/it] {'loss': 1.3064, 'grad_norm': 0.0007939282859335304, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:46<28:50, 3.61s/it] 8%|▊ | 42/520 [02:50<28:54, 3.63s/it] {'loss': 1.304, 'grad_norm': 0.0009948522677718258, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:50<28:54, 3.63s/it] 8%|▊ | 43/520 [02:54<29:20, 3.69s/it] {'loss': 1.2408, 'grad_norm': 0.000698191332529895, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:54<29:20, 3.69s/it] 8%|▊ | 44/520 [02:58<29:32, 3.72s/it] {'loss': 1.3457, 'grad_norm': 0.0008304818064102178, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:58<29:32, 3.72s/it] 9%|▊ | 45/520 [03:01<29:39, 3.75s/it] {'loss': 1.3266, 'grad_norm': 0.0008262076735559276, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:01<29:39, 3.75s/it] 9%|▉ | 46/520 [03:05<29:43, 3.76s/it] {'loss': 1.3847, 'grad_norm': 0.0008110462679183848, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:05<29:43, 3.76s/it] 9%|▉ | 47/520 [03:09<29:45, 3.78s/it] {'loss': 1.2999, 'grad_norm': 0.0008399881987118983, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:09<29:45, 3.78s/it] 9%|▉ | 48/520 [03:13<29:44, 3.78s/it] {'loss': 1.2907, 'grad_norm': 0.0009388925763070434, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:13<29:44, 3.78s/it] 9%|▉ | 49/520 [03:17<29:44, 3.79s/it] {'loss': 1.3279, 'grad_norm': 0.0008676482272748293, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:17<29:44, 3.79s/it] 10%|▉ | 50/520 [03:20<29:43, 3.80s/it] {'loss': 1.3205, 'grad_norm': 0.0008073004591107683, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:20<29:43, 3.80s/it] 10%|▉ | 51/520 [03:24<29:42, 3.80s/it] {'loss': 1.2604, 'grad_norm': 0.0009190245587735754, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:24<29:42, 3.80s/it] 10%|█ | 52/520 [03:28<29:39, 3.80s/it] {'loss': 1.3857, 'grad_norm': 0.0009603693716668317, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:28<29:39, 3.80s/it] 10%|█ | 53/520 [03:32<29:34, 3.80s/it] {'loss': 1.3644, 'grad_norm': 0.0008792016726663801, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:32<29:34, 3.80s/it] 10%|█ | 54/520 [03:35<29:06, 3.75s/it] {'loss': 1.2988, 'grad_norm': 0.0008472621308663243, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:35<29:06, 3.75s/it] 11%|█ | 55/520 [03:39<29:03, 3.75s/it] {'loss': 1.2658, 'grad_norm': 0.0009759691940395862, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:39<29:03, 3.75s/it] 11%|█ | 56/520 [03:43<29:17, 3.79s/it] {'loss': 1.3851, 'grad_norm': 0.0008666244072990018, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:43<29:17, 3.79s/it] 11%|█ | 57/520 [03:47<29:25, 3.81s/it] {'loss': 1.2511, 'grad_norm': 0.0010320805543685965, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:47<29:25, 3.81s/it] 11%|█ | 58/520 [03:51<29:25, 3.82s/it] {'loss': 1.408, 'grad_norm': 0.0007401695921178749, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:51<29:25, 3.82s/it] 11%|█▏ | 59/520 [03:55<29:30, 3.84s/it] {'loss': 1.2206, 'grad_norm': 0.0008517517954807113, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:55<29:30, 3.84s/it] 12%|█▏ | 60/520 [03:59<29:29, 3.85s/it] {'loss': 1.3122, 'grad_norm': 0.0007964981680499641, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:59<29:29, 3.85s/it] 12%|█▏ | 61/520 [04:02<29:26, 3.85s/it] {'loss': 1.2968, 'grad_norm': 0.0008576027414889593, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:02<29:26, 3.85s/it] 12%|█▏ | 62/520 [04:06<29:33, 3.87s/it] {'loss': 1.3003, 'grad_norm': 0.0009606369236477526, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:06<29:33, 3.87s/it] 12%|█▏ | 63/520 [04:10<29:34, 3.88s/it] {'loss': 1.2884, 'grad_norm': 0.0008203781912824399, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:10<29:34, 3.88s/it] 12%|█▏ | 64/520 [04:14<29:25, 3.87s/it] {'loss': 1.3172, 'grad_norm': 0.0008693353064648178, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:14<29:25, 3.87s/it] 12%|█▎ | 65/520 [04:18<29:05, 3.84s/it] {'loss': 1.3217, 'grad_norm': 0.0010014918951053943, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:18<29:05, 3.84s/it] 13%|█▎ | 66/520 [04:21<28:30, 3.77s/it] {'loss': 1.2771, 'grad_norm': 0.0008502259301447154, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:21<28:30, 3.77s/it] 13%|█▎ | 67/520 [04:25<28:07, 3.72s/it] {'loss': 1.185, 'grad_norm': 0.000904478384199165, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:25<28:07, 3.72s/it] 13%|█▎ | 68/520 [04:29<27:47, 3.69s/it] {'loss': 1.2527, 'grad_norm': 0.0008488097463413536, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:29<27:47, 3.69s/it] 13%|█▎ | 69/520 [04:32<27:30, 3.66s/it] {'loss': 1.2365, 'grad_norm': 0.001202636129167261, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:32<27:30, 3.66s/it] 13%|█▎ | 70/520 [04:36<27:34, 3.68s/it] {'loss': 1.2603, 'grad_norm': 0.0009341955233498946, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:36<27:34, 3.68s/it] 14%|█▎ | 71/520 [04:40<27:24, 3.66s/it] {'loss': 1.2049, 'grad_norm': 0.000800605136099941, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:40<27:24, 3.66s/it] 14%|█▍ | 72/520 [04:43<27:13, 3.65s/it] {'loss': 1.3541, 'grad_norm': 0.0008927161864494872, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:43<27:13, 3.65s/it] 14%|█▍ | 73/520 [04:47<27:05, 3.64s/it] {'loss': 1.1847, 'grad_norm': 0.0008764440897488233, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:47<27:05, 3.64s/it] 14%|█▍ | 74/520 [04:50<27:01, 3.64s/it] {'loss': 1.2927, 'grad_norm': 0.0009195356018384626, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:50<27:01, 3.64s/it] 14%|█▍ | 75/520 [04:54<26:54, 3.63s/it] {'loss': 1.2113, 'grad_norm': 0.0007985878238846514, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:54<26:54, 3.63s/it] 15%|█▍ | 76/520 [04:58<26:51, 3.63s/it] {'loss': 1.3422, 'grad_norm': 0.0007436085127263038, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:58<26:51, 3.63s/it] 15%|█▍ | 77/520 [05:01<26:45, 3.63s/it] {'loss': 1.1332, 'grad_norm': 0.0009736060070790325, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:01<26:45, 3.63s/it] 15%|█▌ | 78/520 [05:05<26:43, 3.63s/it] {'loss': 1.2449, 'grad_norm': 0.0009152794651061804, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:05<26:43, 3.63s/it] 15%|█▌ | 79/520 [05:09<26:39, 3.63s/it] {'loss': 1.2341, 'grad_norm': 0.0009170887172572663, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:09<26:39, 3.63s/it] 15%|█▌ | 80/520 [05:12<26:33, 3.62s/it] {'loss': 1.3245, 'grad_norm': 0.0008837379742667648, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:12<26:33, 3.62s/it] 16%|█▌ | 81/520 [05:16<26:27, 3.62s/it] {'loss': 1.3721, 'grad_norm': 0.001200388646889111, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:16<26:27, 3.62s/it] 16%|█▌ | 82/520 [05:19<26:25, 3.62s/it] {'loss': 1.3009, 'grad_norm': 0.0008875257729548425, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:19<26:25, 3.62s/it] 16%|█▌ | 83/520 [05:23<26:20, 3.62s/it] {'loss': 1.3103, 'grad_norm': 0.0009409999421636405, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:23<26:20, 3.62s/it] 16%|█▌ | 84/520 [05:27<26:21, 3.63s/it] {'loss': 1.324, 'grad_norm': 0.0009474193146527601, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:27<26:21, 3.63s/it] 16%|█▋ | 85/520 [05:30<26:14, 3.62s/it] {'loss': 1.3691, 'grad_norm': 0.0009340685806409159, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:30<26:14, 3.62s/it] 17%|█▋ | 86/520 [05:34<26:14, 3.63s/it] {'loss': 1.3583, 'grad_norm': 0.00092079105130133, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:34<26:14, 3.63s/it] 17%|█▋ | 87/520 [05:38<26:15, 3.64s/it] {'loss': 1.2749, 'grad_norm': 0.0008208385104446088, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:38<26:15, 3.64s/it] 17%|█▋ | 88/520 [05:41<26:13, 3.64s/it] {'loss': 1.2183, 'grad_norm': 0.0007057571342126611, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:41<26:13, 3.64s/it] 17%|█▋ | 89/520 [05:45<26:04, 3.63s/it] {'loss': 1.3101, 'grad_norm': 0.0008981615043965905, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:45<26:04, 3.63s/it] 17%|█▋ | 90/520 [05:48<26:03, 3.64s/it] {'loss': 1.246, 'grad_norm': 0.0008830087896964513, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:48<26:03, 3.64s/it] 18%|█▊ | 91/520 [05:52<25:53, 3.62s/it] {'loss': 1.3166, 'grad_norm': 0.0008299777340792309, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:52<25:53, 3.62s/it] 18%|█▊ | 92/520 [05:56<25:46, 3.61s/it] {'loss': 1.263, 'grad_norm': 0.0009409811911504178, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:56<25:46, 3.61s/it] 18%|█▊ | 93/520 [05:59<25:46, 3.62s/it] {'loss': 1.2663, 'grad_norm': 0.0009679556461365521, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:59<25:46, 3.62s/it] 18%|█▊ | 94/520 [06:03<25:38, 3.61s/it] {'loss': 1.3446, 'grad_norm': 0.0008596880641002105, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:03<25:38, 3.61s/it] 18%|█▊ | 95/520 [06:06<25:34, 3.61s/it] {'loss': 1.2496, 'grad_norm': 0.0010669514659318936, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:06<25:34, 3.61s/it] 18%|█▊ | 96/520 [06:10<25:28, 3.61s/it] {'loss': 1.2644, 'grad_norm': 0.0007704385377439495, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:10<25:28, 3.61s/it] 19%|█▊ | 97/520 [06:14<25:27, 3.61s/it] {'loss': 1.2327, 'grad_norm': 0.0010584896381733648, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:14<25:27, 3.61s/it] 19%|█▉ | 98/520 [06:17<25:30, 3.63s/it] {'loss': 1.2375, 'grad_norm': 0.0007890641017497085, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:17<25:30, 3.63s/it] 19%|█▉ | 99/520 [06:21<25:52, 3.69s/it] {'loss': 1.2434, 'grad_norm': 0.0009556370235718186, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:21<25:52, 3.69s/it] 19%|█▉ | 100/520 [06:25<26:15, 3.75s/it] {'loss': 1.2168, 'grad_norm': 0.0007687842298901884, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:25<26:15, 3.75s/it] 19%|█▉ | 101/520 [06:29<26:06, 3.74s/it] {'loss': 1.2553, 'grad_norm': 0.000893043727603848, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:29<26:06, 3.74s/it] 20%|█▉ | 102/520 [06:32<25:45, 3.70s/it] {'loss': 1.2601, 'grad_norm': 0.0008985021960291864, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:32<25:45, 3.70s/it] 20%|█▉ | 103/520 [06:36<25:27, 3.66s/it] {'loss': 1.1876, 'grad_norm': 0.0008224390520684312, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:36<25:27, 3.66s/it] 20%|██ | 104/520 [06:40<25:15, 3.64s/it] {'loss': 1.264, 'grad_norm': 0.0008929837142248051, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:40<25:15, 3.64s/it] 20%|██ | 105/520 [06:43<25:11, 3.64s/it] {'loss': 1.2547, 'grad_norm': 0.0008470147790923336, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:43<25:11, 3.64s/it] 20%|██ | 106/520 [06:47<25:08, 3.64s/it] {'loss': 1.2517, 'grad_norm': 0.000806605981433839, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:47<25:08, 3.64s/it] 21%|██ | 107/520 [06:51<25:04, 3.64s/it] {'loss': 1.227, 'grad_norm': 0.0008738807953355856, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:51<25:04, 3.64s/it] 21%|██ | 108/520 [06:54<25:04, 3.65s/it] {'loss': 1.2146, 'grad_norm': 0.0009463061135707595, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:54<25:04, 3.65s/it] 21%|██ | 109/520 [06:58<25:00, 3.65s/it] {'loss': 1.2027, 'grad_norm': 0.0007493426098233202, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:58<25:00, 3.65s/it] 21%|██ | 110/520 [07:01<24:55, 3.65s/it] {'loss': 1.3935, 'grad_norm': 0.0009235091876686315, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:01<24:55, 3.65s/it] 21%|██▏ | 111/520 [07:05<24:45, 3.63s/it] {'loss': 1.3877, 'grad_norm': 0.000956909258663794, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:05<24:45, 3.63s/it] 22%|██▏ | 112/520 [07:09<24:41, 3.63s/it] {'loss': 1.2756, 'grad_norm': 0.0008352471410375827, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:09<24:41, 3.63s/it] 22%|██▏ | 113/520 [07:12<24:35, 3.63s/it] {'loss': 1.1743, 'grad_norm': 0.000838453919479114, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:12<24:35, 3.63s/it] 22%|██▏ | 114/520 [07:16<24:32, 3.63s/it] {'loss': 1.2678, 'grad_norm': 0.0008330453818017206, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:16<24:32, 3.63s/it] 22%|██▏ | 115/520 [07:20<24:29, 3.63s/it] {'loss': 1.3597, 'grad_norm': 0.000889415535777807, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:20<24:29, 3.63s/it] 22%|██▏ | 116/520 [07:23<24:27, 3.63s/it] {'loss': 1.3726, 'grad_norm': 0.0008791640442954034, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:23<24:27, 3.63s/it] 22%|██▎ | 117/520 [07:27<24:22, 3.63s/it] {'loss': 1.334, 'grad_norm': 0.0010361406165796134, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:27<24:22, 3.63s/it] 23%|██▎ | 118/520 [07:30<24:20, 3.63s/it] {'loss': 1.263, 'grad_norm': 0.0008757828028380695, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:30<24:20, 3.63s/it] 23%|██▎ | 119/520 [07:34<24:17, 3.63s/it] {'loss': 1.2196, 'grad_norm': 0.0008757408495817208, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:34<24:17, 3.63s/it] 23%|██▎ | 120/520 [07:38<24:22, 3.66s/it] {'loss': 1.2277, 'grad_norm': 0.00104290837445256, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:38<24:22, 3.66s/it] 23%|██▎ | 121/520 [07:41<24:14, 3.65s/it] {'loss': 1.2772, 'grad_norm': 0.0011029402011184452, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:41<24:14, 3.65s/it] 23%|██▎ | 122/520 [07:45<24:09, 3.64s/it] {'loss': 1.1925, 'grad_norm': 0.000924952185793782, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:45<24:09, 3.64s/it] 24%|██▎ | 123/520 [07:49<24:02, 3.63s/it] {'loss': 1.2934, 'grad_norm': 0.0010428915071798482, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:49<24:02, 3.63s/it] 24%|██▍ | 124/520 [07:52<23:58, 3.63s/it] {'loss': 1.248, 'grad_norm': 0.0010193199341947325, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:52<23:58, 3.63s/it] 24%|██▍ | 125/520 [07:56<23:56, 3.64s/it] {'loss': 1.2432, 'grad_norm': 0.0008474302341690026, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:56<23:56, 3.64s/it] 24%|██▍ | 126/520 [08:00<25:09, 3.83s/it] {'loss': 1.2267, 'grad_norm': 0.0007685402805711712, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:00<25:09, 3.83s/it] 24%|██▍ | 127/520 [08:04<24:44, 3.78s/it] {'loss': 1.2243, 'grad_norm': 0.0010091636375341025, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:04<24:44, 3.78s/it] 25%|██▍ | 128/520 [08:08<24:21, 3.73s/it] {'loss': 1.265, 'grad_norm': 0.0009298300476763998, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:08<24:21, 3.73s/it] 25%|██▍ | 129/520 [08:11<24:09, 3.71s/it] {'loss': 1.2334, 'grad_norm': 0.0008577710106999447, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:11<24:09, 3.71s/it] 25%|██▌ | 130/520 [08:15<23:53, 3.68s/it] {'loss': 1.2566, 'grad_norm': 0.0008160141640384788, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:15<23:53, 3.68s/it] 25%|██▌ | 131/520 [08:18<23:43, 3.66s/it] {'loss': 1.1905, 'grad_norm': 0.0007771779423648903, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:18<23:43, 3.66s/it] 25%|██▌ | 132/520 [08:22<23:32, 3.64s/it] {'loss': 1.3049, 'grad_norm': 0.0010228813235683826, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:22<23:32, 3.64s/it] 26%|██▌ | 133/520 [08:26<23:28, 3.64s/it] {'loss': 1.2245, 'grad_norm': 0.0009449624176156412, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:26<23:28, 3.64s/it] 26%|██▌ | 134/520 [08:29<23:21, 3.63s/it] {'loss': 1.2983, 'grad_norm': 0.000877491935910777, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:29<23:21, 3.63s/it] 26%|██▌ | 135/520 [08:33<23:14, 3.62s/it] {'loss': 1.3494, 'grad_norm': 0.0008965910198459117, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:33<23:14, 3.62s/it] 26%|██▌ | 136/520 [08:36<23:09, 3.62s/it] {'loss': 1.2975, 'grad_norm': 0.0008688522705726139, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:36<23:09, 3.62s/it] 26%|██▋ | 137/520 [08:40<23:05, 3.62s/it] {'loss': 1.212, 'grad_norm': 0.001006622606148211, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:40<23:05, 3.62s/it] 27%|██▋ | 138/520 [08:44<23:01, 3.62s/it] {'loss': 1.2289, 'grad_norm': 0.0008150818360006318, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:44<23:01, 3.62s/it] 27%|██▋ | 139/520 [08:47<23:01, 3.63s/it] {'loss': 1.1044, 'grad_norm': 0.0008534869287113603, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:47<23:01, 3.63s/it] 27%|██▋ | 140/520 [08:51<22:58, 3.63s/it] {'loss': 1.2439, 'grad_norm': 0.0007998898656473003, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:51<22:58, 3.63s/it] 27%|██▋ | 141/520 [08:55<23:06, 3.66s/it] {'loss': 1.3297, 'grad_norm': 0.0008141528031986511, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:55<23:06, 3.66s/it] 27%|██▋ | 142/520 [08:59<23:26, 3.72s/it] {'loss': 1.2543, 'grad_norm': 0.0008096408170050914, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:59<23:26, 3.72s/it] 28%|██▊ | 143/520 [09:02<23:35, 3.75s/it] {'loss': 1.262, 'grad_norm': 0.0009277909061195558, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:02<23:35, 3.75s/it] 28%|██▊ | 144/520 [09:06<23:41, 3.78s/it] {'loss': 1.2304, 'grad_norm': 0.0009144430412241761, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:06<23:41, 3.78s/it] 28%|██▊ | 145/520 [09:10<23:44, 3.80s/it] {'loss': 1.159, 'grad_norm': 0.0007953820830840432, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:10<23:44, 3.80s/it] 28%|██▊ | 146/520 [09:14<23:45, 3.81s/it] {'loss': 1.3044, 'grad_norm': 0.0008404398095169322, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:14<23:45, 3.81s/it] 28%|██▊ | 147/520 [09:18<23:46, 3.82s/it] {'loss': 1.2065, 'grad_norm': 0.0008930190036683225, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:18<23:46, 3.82s/it] 28%|██▊ | 148/520 [09:22<23:43, 3.83s/it] {'loss': 1.2301, 'grad_norm': 0.0008377814414135254, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:22<23:43, 3.83s/it] 29%|██▊ | 149/520 [09:25<23:43, 3.84s/it] {'loss': 1.1693, 'grad_norm': 0.0008291008097393082, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:25<23:43, 3.84s/it] 29%|██▉ | 150/520 [09:29<23:39, 3.84s/it] {'loss': 1.39, 'grad_norm': 0.0008471201244078545, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:29<23:39, 3.84s/it] 29%|██▉ | 151/520 [09:33<23:35, 3.84s/it] {'loss': 1.2175, 'grad_norm': 0.0008677898993370628, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:33<23:35, 3.84s/it] 29%|██▉ | 152/520 [09:37<23:34, 3.84s/it] {'loss': 1.1893, 'grad_norm': 0.0008939948144716188, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:37<23:34, 3.84s/it] 29%|██▉ | 153/520 [09:41<23:31, 3.85s/it] {'loss': 1.2283, 'grad_norm': 0.0008643528092133646, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:41<23:31, 3.85s/it] 30%|██▉ | 154/520 [09:45<23:29, 3.85s/it] {'loss': 1.3043, 'grad_norm': 0.0008365961332895786, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:45<23:29, 3.85s/it] 30%|██▉ | 155/520 [09:49<23:25, 3.85s/it] {'loss': 1.2209, 'grad_norm': 0.0008699726668786786, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:49<23:25, 3.85s/it] 30%|███ | 156/520 [09:52<23:18, 3.84s/it] {'loss': 1.2439, 'grad_norm': 0.001155013387536652, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:52<23:18, 3.84s/it] 30%|███ | 157/520 [09:56<23:17, 3.85s/it] {'loss': 1.2936, 'grad_norm': 0.0007932322282117376, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:56<23:17, 3.85s/it] 30%|███ | 158/520 [10:00<23:13, 3.85s/it] {'loss': 1.2242, 'grad_norm': 0.0008701399646017258, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:00<23:13, 3.85s/it] 31%|███ | 159/520 [10:04<23:09, 3.85s/it] {'loss': 1.2698, 'grad_norm': 0.0008345717585043191, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:04<23:09, 3.85s/it] 31%|███ | 160/520 [10:08<23:06, 3.85s/it] {'loss': 1.2763, 'grad_norm': 0.0008974239498888698, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:08<23:06, 3.85s/it] 31%|███ | 161/520 [10:12<23:01, 3.85s/it] {'loss': 1.2526, 'grad_norm': 0.0008707262530884913, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:12<23:01, 3.85s/it] 31%|███ | 162/520 [10:15<22:58, 3.85s/it] {'loss': 1.2384, 'grad_norm': 0.0007989698222715999, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:16<22:58, 3.85s/it] 31%|███▏ | 163/520 [10:19<22:57, 3.86s/it] {'loss': 1.1578, 'grad_norm': 0.0009782902165301833, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:19<22:57, 3.86s/it] 32%|███▏ | 164/520 [10:23<22:53, 3.86s/it] {'loss': 1.1185, 'grad_norm': 0.0007947310534209039, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:23<22:53, 3.86s/it] 32%|███▏ | 165/520 [10:27<22:51, 3.86s/it] {'loss': 1.2686, 'grad_norm': 0.0008076249961951907, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:27<22:51, 3.86s/it] 32%|███▏ | 166/520 [10:31<22:33, 3.82s/it] {'loss': 1.2334, 'grad_norm': 0.0009057233166390778, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:31<22:33, 3.82s/it] 32%|███▏ | 167/520 [10:34<22:09, 3.77s/it] {'loss': 1.2297, 'grad_norm': 0.0008454279562405422, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:34<22:09, 3.77s/it] 32%|███▏ | 168/520 [10:38<21:49, 3.72s/it] {'loss': 1.1743, 'grad_norm': 0.0008240866121795479, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:38<21:49, 3.72s/it] 32%|███▎ | 169/520 [10:42<21:33, 3.68s/it] {'loss': 1.2508, 'grad_norm': 0.0008488516103447448, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:42<21:33, 3.68s/it] 33%|███▎ | 170/520 [10:45<21:25, 3.67s/it] {'loss': 1.1918, 'grad_norm': 0.0007257937665724454, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:45<21:25, 3.67s/it] 33%|███▎ | 171/520 [10:49<21:20, 3.67s/it] {'loss': 1.1836, 'grad_norm': 0.0008833720160810536, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:49<21:20, 3.67s/it] 33%|███▎ | 172/520 [10:53<21:11, 3.65s/it] {'loss': 1.2551, 'grad_norm': 0.0007996214238450527, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:53<21:11, 3.65s/it] 33%|███▎ | 173/520 [10:56<21:05, 3.65s/it] {'loss': 1.195, 'grad_norm': 0.0008078424153152116, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:56<21:05, 3.65s/it] 33%|███▎ | 174/520 [11:00<21:04, 3.65s/it] {'loss': 1.2432, 'grad_norm': 0.0008577279816458995, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:00<21:04, 3.65s/it] 34%|███▎ | 175/520 [11:04<21:02, 3.66s/it] {'loss': 1.1673, 'grad_norm': 0.0007941352662689783, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:04<21:02, 3.66s/it] 34%|███▍ | 176/520 [11:07<21:01, 3.67s/it] {'loss': 1.2672, 'grad_norm': 0.0008443455790396395, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:07<21:01, 3.67s/it] 34%|███▍ | 177/520 [11:11<20:59, 3.67s/it] {'loss': 1.1467, 'grad_norm': 0.0008404699741509097, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:11<20:59, 3.67s/it] 34%|███▍ | 178/520 [11:15<20:53, 3.67s/it] {'loss': 1.231, 'grad_norm': 0.0009234667379070028, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:15<20:53, 3.67s/it] 34%|███▍ | 179/520 [11:18<20:53, 3.67s/it] {'loss': 1.3027, 'grad_norm': 0.0008189542744622301, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:18<20:53, 3.67s/it] 35%|███▍ | 180/520 [11:22<20:46, 3.67s/it] {'loss': 1.2256, 'grad_norm': 0.0008542409527724874, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:22<20:46, 3.67s/it] 35%|███▍ | 181/520 [11:26<20:42, 3.66s/it] {'loss': 1.2011, 'grad_norm': 0.000753779631222641, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:26<20:42, 3.66s/it] 35%|███▌ | 182/520 [11:29<20:39, 3.67s/it] {'loss': 1.2121, 'grad_norm': 0.0008865425462519556, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:29<20:39, 3.67s/it] 35%|███▌ | 183/520 [11:33<20:35, 3.67s/it] {'loss': 1.2353, 'grad_norm': 0.0008345562281357481, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:33<20:35, 3.67s/it] 35%|███▌ | 184/520 [11:37<20:32, 3.67s/it] {'loss': 1.1738, 'grad_norm': 0.0008672055386509615, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:37<20:32, 3.67s/it] 36%|███▌ | 185/520 [11:40<20:45, 3.72s/it] {'loss': 1.3045, 'grad_norm': 0.0008592014431192509, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:40<20:45, 3.72s/it] 36%|███▌ | 186/520 [11:44<20:52, 3.75s/it] {'loss': 1.1942, 'grad_norm': 0.0008694471161038666, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:44<20:52, 3.75s/it] 36%|███▌ | 187/520 [11:48<20:57, 3.78s/it] {'loss': 1.1925, 'grad_norm': 0.0009451357069073482, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:48<20:57, 3.78s/it] 36%|███▌ | 188/520 [11:52<20:43, 3.75s/it] {'loss': 1.2801, 'grad_norm': 0.0008767704819563486, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:52<20:43, 3.75s/it] 36%|███▋ | 189/520 [11:55<20:32, 3.72s/it] {'loss': 1.2812, 'grad_norm': 0.0007875391040760289, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:55<20:32, 3.72s/it] 37%|███▋ | 190/520 [11:59<20:21, 3.70s/it] {'loss': 1.2057, 'grad_norm': 0.0008925468653948328, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:59<20:21, 3.70s/it] 37%|███▋ | 191/520 [12:03<20:08, 3.67s/it] {'loss': 1.1666, 'grad_norm': 0.000794913187917838, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:03<20:08, 3.67s/it] 37%|███▋ | 192/520 [12:06<20:02, 3.67s/it] {'loss': 1.2419, 'grad_norm': 0.0007825404559666692, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:06<20:02, 3.67s/it] 37%|███▋ | 193/520 [12:10<19:53, 3.65s/it] {'loss': 1.195, 'grad_norm': 0.0008989421524055659, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:10<19:53, 3.65s/it] 37%|███▋ | 194/520 [12:14<19:48, 3.64s/it] {'loss': 1.094, 'grad_norm': 0.0007367349836900654, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:14<19:48, 3.64s/it] 38%|███▊ | 195/520 [12:17<19:43, 3.64s/it] {'loss': 1.2682, 'grad_norm': 0.0008435272567153017, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:17<19:43, 3.64s/it] 38%|███▊ | 196/520 [12:21<19:43, 3.65s/it] {'loss': 1.2395, 'grad_norm': 0.0009325850407275281, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:21<19:43, 3.65s/it] 38%|███▊ | 197/520 [12:25<19:37, 3.64s/it] {'loss': 1.1874, 'grad_norm': 0.0008540583114668342, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:25<19:37, 3.64s/it] 38%|███▊ | 198/520 [12:28<19:34, 3.65s/it] {'loss': 1.2576, 'grad_norm': 0.0009224937067131134, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:28<19:34, 3.65s/it] 38%|███▊ | 199/520 [12:32<19:30, 3.65s/it] {'loss': 1.1785, 'grad_norm': 0.0008528637926720883, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:32<19:30, 3.65s/it] 38%|███▊ | 200/520 [12:36<19:26, 3.65s/it] {'loss': 1.1489, 'grad_norm': 0.0008951672181242832, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:36<19:26, 3.65s/it] 39%|███▊ | 201/520 [12:39<19:20, 3.64s/it] {'loss': 1.1752, 'grad_norm': 0.0007558707712018084, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:39<19:20, 3.64s/it] 39%|███▉ | 202/520 [12:43<19:13, 3.63s/it] {'loss': 1.1835, 'grad_norm': 0.0008688285400186696, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:43<19:13, 3.63s/it] 39%|███▉ | 203/520 [12:46<19:06, 3.62s/it] {'loss': 1.2314, 'grad_norm': 0.0008813664102287126, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:46<19:06, 3.62s/it] 39%|███▉ | 204/520 [12:50<19:02, 3.62s/it] {'loss': 1.244, 'grad_norm': 0.0008612251057680676, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:50<19:02, 3.62s/it] 39%|███▉ | 205/520 [12:54<18:59, 3.62s/it] {'loss': 1.1718, 'grad_norm': 0.0007992528412845638, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:54<18:59, 3.62s/it] 40%|███▉ | 206/520 [12:57<18:57, 3.62s/it] {'loss': 1.2758, 'grad_norm': 0.0008269080169807673, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:57<18:57, 3.62s/it] 40%|███▉ | 207/520 [13:01<18:53, 3.62s/it] {'loss': 1.1432, 'grad_norm': 0.0007561382185977707, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:01<18:53, 3.62s/it] 40%|████ | 208/520 [13:04<18:47, 3.62s/it] {'loss': 1.2707, 'grad_norm': 0.0009345987283874486, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:04<18:47, 3.62s/it] 40%|████ | 209/520 [13:08<18:46, 3.62s/it] {'loss': 1.1838, 'grad_norm': 0.0008136577746112401, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:08<18:46, 3.62s/it] 40%|████ | 210/520 [13:12<18:42, 3.62s/it] {'loss': 1.2527, 'grad_norm': 0.0008884277873154427, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:12<18:42, 3.62s/it] 41%|████ | 211/520 [13:15<18:38, 3.62s/it] {'loss': 1.2539, 'grad_norm': 0.0007971162413738119, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:15<18:38, 3.62s/it] 41%|████ | 212/520 [13:19<18:35, 3.62s/it] {'loss': 1.251, 'grad_norm': 0.0008279394008937745, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:19<18:35, 3.62s/it] 41%|████ | 213/520 [13:23<18:31, 3.62s/it] {'loss': 1.2037, 'grad_norm': 0.0009403194431126934, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:23<18:31, 3.62s/it] 41%|████ | 214/520 [13:26<18:28, 3.62s/it] {'loss': 1.1917, 'grad_norm': 0.00086698009412499, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:26<18:28, 3.62s/it] 41%|████▏ | 215/520 [13:30<18:24, 3.62s/it] {'loss': 1.1074, 'grad_norm': 0.0007854382778733446, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:30<18:24, 3.62s/it] 42%|████▏ | 216/520 [13:33<18:21, 3.62s/it] {'loss': 1.1198, 'grad_norm': 0.0008597744220881788, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:33<18:21, 3.62s/it] 42%|████▏ | 217/520 [13:37<18:26, 3.65s/it] {'loss': 1.2467, 'grad_norm': 0.00091151262883986, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:37<18:26, 3.65s/it] 42%|████▏ | 218/520 [13:41<18:22, 3.65s/it] {'loss': 1.2213, 'grad_norm': 0.0009415061715895281, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:41<18:22, 3.65s/it] 42%|████▏ | 219/520 [13:44<18:15, 3.64s/it] {'loss': 1.2323, 'grad_norm': 0.0007679867765146348, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:44<18:15, 3.64s/it] 42%|████▏ | 220/520 [13:48<18:10, 3.63s/it] {'loss': 1.1547, 'grad_norm': 0.000781942973050939, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:48<18:10, 3.63s/it] 42%|████▎ | 221/520 [13:52<18:06, 3.63s/it] {'loss': 1.228, 'grad_norm': 0.0008359423853371085, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:52<18:06, 3.63s/it] 43%|████▎ | 222/520 [13:55<18:04, 3.64s/it] {'loss': 1.1725, 'grad_norm': 0.0008512268589071925, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:55<18:04, 3.64s/it] 43%|████▎ | 223/520 [13:59<17:58, 3.63s/it] {'loss': 1.1678, 'grad_norm': 0.0008088491703670212, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [13:59<17:58, 3.63s/it] 43%|████▎ | 224/520 [14:03<17:57, 3.64s/it] {'loss': 1.2072, 'grad_norm': 0.0007541030947902914, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:03<17:57, 3.64s/it] 43%|████▎ | 225/520 [14:06<17:54, 3.64s/it] {'loss': 1.1688, 'grad_norm': 0.000839595423882105, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:06<17:54, 3.64s/it] 43%|████▎ | 226/520 [14:10<17:48, 3.63s/it] {'loss': 1.267, 'grad_norm': 0.0008306678525370377, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:10<17:48, 3.63s/it] 44%|████▎ | 227/520 [14:13<17:43, 3.63s/it] {'loss': 1.2553, 'grad_norm': 0.0008077483079910234, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:13<17:43, 3.63s/it] 44%|████▍ | 228/520 [14:17<17:43, 3.64s/it] {'loss': 1.2555, 'grad_norm': 0.0008607771993620531, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:17<17:43, 3.64s/it] 44%|████▍ | 229/520 [14:21<17:37, 3.63s/it] {'loss': 1.2264, 'grad_norm': 0.0007913622617650644, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:21<17:37, 3.63s/it] 44%|████▍ | 230/520 [14:24<17:34, 3.64s/it] {'loss': 1.1215, 'grad_norm': 0.0008097394493247945, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:24<17:34, 3.64s/it] 44%|████▍ | 231/520 [14:28<17:30, 3.64s/it] {'loss': 1.1854, 'grad_norm': 0.0007955164365305462, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:28<17:30, 3.64s/it] 45%|████▍ | 232/520 [14:32<17:26, 3.63s/it] {'loss': 1.291, 'grad_norm': 0.0008894468503896836, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:32<17:26, 3.63s/it] 45%|████▍ | 233/520 [14:35<17:23, 3.64s/it] {'loss': 1.1796, 'grad_norm': 0.0008671683341629986, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:35<17:23, 3.64s/it] 45%|████▌ | 234/520 [14:39<17:21, 3.64s/it] {'loss': 1.1355, 'grad_norm': 0.0008938682417908167, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:39<17:21, 3.64s/it] 45%|████▌ | 235/520 [14:43<17:16, 3.64s/it] {'loss': 1.1887, 'grad_norm': 0.0008647217717218824, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:43<17:16, 3.64s/it] 45%|████▌ | 236/520 [14:46<17:14, 3.64s/it] {'loss': 1.2561, 'grad_norm': 0.0007926755184812288, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:46<17:14, 3.64s/it] 46%|████▌ | 237/520 [14:50<17:10, 3.64s/it] {'loss': 1.2632, 'grad_norm': 0.0008301484493165319, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:50<17:10, 3.64s/it] 46%|████▌ | 238/520 [14:53<17:05, 3.64s/it] {'loss': 1.1948, 'grad_norm': 0.000873915118587745, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:53<17:05, 3.64s/it] 46%|████▌ | 239/520 [14:57<17:01, 3.63s/it] {'loss': 1.2626, 'grad_norm': 0.0008628639820578861, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [14:57<17:01, 3.63s/it] 46%|████▌ | 240/520 [15:01<16:58, 3.64s/it] {'loss': 1.0848, 'grad_norm': 0.000785747418945769, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:01<16:58, 3.64s/it] 46%|████▋ | 241/520 [15:04<16:55, 3.64s/it] {'loss': 1.1677, 'grad_norm': 0.0008190429696406211, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:04<16:55, 3.64s/it] 47%|████▋ | 242/520 [15:08<17:09, 3.70s/it] {'loss': 1.1786, 'grad_norm': 0.0007933415267710432, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:08<17:09, 3.70s/it] 47%|████▋ | 243/520 [15:12<17:22, 3.77s/it] {'loss': 1.1737, 'grad_norm': 0.0008369736341726864, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:12<17:22, 3.77s/it] 47%|████▋ | 244/520 [15:16<17:23, 3.78s/it] {'loss': 1.2761, 'grad_norm': 0.0008399018239144131, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:16<17:23, 3.78s/it] 47%|████▋ | 245/520 [15:20<17:25, 3.80s/it] {'loss': 1.152, 'grad_norm': 0.0008668901920333299, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:20<17:25, 3.80s/it] 47%|████▋ | 246/520 [15:24<17:25, 3.82s/it] {'loss': 1.2796, 'grad_norm': 0.0008391288616689225, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:24<17:25, 3.82s/it] 48%|████▊ | 247/520 [15:28<17:23, 3.82s/it] {'loss': 1.3275, 'grad_norm': 0.0008837429022358381, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:28<17:23, 3.82s/it] 48%|████▊ | 248/520 [15:31<17:19, 3.82s/it] {'loss': 1.1499, 'grad_norm': 0.0008299400051029903, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:31<17:19, 3.82s/it] 48%|████▊ | 249/520 [15:35<16:58, 3.76s/it] {'loss': 1.2416, 'grad_norm': 0.0008455310348781001, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:35<16:58, 3.76s/it] 48%|████▊ | 250/520 [15:39<16:46, 3.73s/it] {'loss': 1.1815, 'grad_norm': 0.0008804146852896615, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:39<16:46, 3.73s/it] 48%|████▊ | 251/520 [15:42<16:37, 3.71s/it] {'loss': 1.2489, 'grad_norm': 0.0007960735172627402, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:42<16:37, 3.71s/it] 48%|████▊ | 252/520 [15:46<16:30, 3.70s/it] {'loss': 1.1831, 'grad_norm': 0.0007897542623714591, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:46<16:30, 3.70s/it] 49%|████▊ | 253/520 [15:50<16:24, 3.69s/it] {'loss': 1.2402, 'grad_norm': 0.0009296959993947643, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:50<16:24, 3.69s/it] 49%|████▉ | 254/520 [15:53<16:17, 3.67s/it] {'loss': 1.185, 'grad_norm': 0.0008117657085757773, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:53<16:17, 3.67s/it] 49%|████▉ | 255/520 [15:57<16:10, 3.66s/it] {'loss': 1.1846, 'grad_norm': 0.0009157615399641137, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [15:57<16:10, 3.66s/it] 49%|████▉ | 256/520 [16:00<16:03, 3.65s/it] {'loss': 1.2381, 'grad_norm': 0.0008972159583042667, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:00<16:03, 3.65s/it] 49%|████▉ | 257/520 [16:04<15:57, 3.64s/it] {'loss': 1.2177, 'grad_norm': 0.0008758623302161924, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:04<15:57, 3.64s/it] 50%|████▉ | 258/520 [16:08<15:53, 3.64s/it] {'loss': 1.2211, 'grad_norm': 0.0008268095267193986, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:08<15:53, 3.64s/it] 50%|████▉ | 259/520 [16:11<15:49, 3.64s/it] {'loss': 1.3007, 'grad_norm': 0.000937847141996037, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:11<15:49, 3.64s/it] 50%|█████ | 260/520 [16:15<15:46, 3.64s/it] {'loss': 1.2258, 'grad_norm': 0.000718601199073875, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:15<15:46, 3.64s/it] 50%|█████ | 261/520 [16:19<15:43, 3.64s/it] {'loss': 1.1731, 'grad_norm': 0.000849913896537467, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:19<15:43, 3.64s/it] 50%|█████ | 262/520 [16:22<15:37, 3.63s/it] {'loss': 1.1587, 'grad_norm': 0.0008383514950712482, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:22<15:37, 3.63s/it] 51%|█████ | 263/520 [16:26<15:32, 3.63s/it] {'loss': 1.1981, 'grad_norm': 0.0008203093530064953, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:26<15:32, 3.63s/it] 51%|█████ | 264/520 [16:30<15:59, 3.75s/it] {'loss': 1.2492, 'grad_norm': 0.0008164980106742697, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:30<15:59, 3.75s/it] 51%|█████ | 265/520 [16:34<16:25, 3.87s/it] {'loss': 1.1688, 'grad_norm': 0.0009808704623502152, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:34<16:25, 3.87s/it] 51%|█████ | 266/520 [16:38<16:44, 3.96s/it] {'loss': 1.053, 'grad_norm': 0.0007663666165698818, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:38<16:44, 3.96s/it] 51%|█████▏ | 267/520 [16:42<16:39, 3.95s/it] {'loss': 1.1667, 'grad_norm': 0.0008079397930063832, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:42<16:39, 3.95s/it] 52%|█████▏ | 268/520 [16:46<16:27, 3.92s/it] {'loss': 1.2926, 'grad_norm': 0.0009765732598532152, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:46<16:27, 3.92s/it] 52%|█████▏ | 269/520 [16:50<16:17, 3.89s/it] {'loss': 1.2713, 'grad_norm': 0.0008694588046867668, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:50<16:17, 3.89s/it] 52%|█████▏ | 270/520 [16:54<16:14, 3.90s/it] {'loss': 1.1327, 'grad_norm': 0.0008183659444197837, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:54<16:14, 3.90s/it] 52%|█████▏ | 271/520 [16:57<15:56, 3.84s/it] {'loss': 1.2506, 'grad_norm': 0.0008479554670462729, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [16:57<15:56, 3.84s/it] 52%|█████▏ | 272/520 [17:01<15:40, 3.79s/it] {'loss': 1.1486, 'grad_norm': 0.0007938303094776202, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:01<15:40, 3.79s/it] 52%|█████▎ | 273/520 [17:05<15:24, 3.74s/it] {'loss': 1.2531, 'grad_norm': 0.000791225841414644, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:05<15:24, 3.74s/it] 53%|█████▎ | 274/520 [17:08<15:14, 3.72s/it] {'loss': 1.2351, 'grad_norm': 0.000891287604132013, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:08<15:14, 3.72s/it] 53%|█████▎ | 275/520 [17:12<15:06, 3.70s/it] {'loss': 1.1797, 'grad_norm': 0.0009279417278149876, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:12<15:06, 3.70s/it] 53%|█████▎ | 276/520 [17:16<15:00, 3.69s/it] {'loss': 1.2403, 'grad_norm': 0.0010081507893760476, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:16<15:00, 3.69s/it] 53%|█████▎ | 277/520 [17:19<14:55, 3.69s/it] {'loss': 1.2638, 'grad_norm': 0.0007698586002155023, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:19<14:55, 3.69s/it] 53%|█████▎ | 278/520 [17:23<14:49, 3.68s/it] {'loss': 1.1333, 'grad_norm': 0.0007689728460777208, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:23<14:49, 3.68s/it] 54%|█████▎ | 279/520 [17:27<14:45, 3.67s/it] {'loss': 1.1369, 'grad_norm': 0.0008498246008167036, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:27<14:45, 3.67s/it] 54%|█████▍ | 280/520 [17:30<14:44, 3.68s/it] {'loss': 1.1703, 'grad_norm': 0.0009569603397368853, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:30<14:44, 3.68s/it] 54%|█████▍ | 281/520 [17:34<14:52, 3.74s/it] {'loss': 1.2721, 'grad_norm': 0.0008726988097314567, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:34<14:52, 3.74s/it] 54%|█████▍ | 282/520 [17:38<14:47, 3.73s/it] {'loss': 1.1455, 'grad_norm': 0.0008111271096278645, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:38<14:47, 3.73s/it] 54%|█████▍ | 283/520 [17:42<14:39, 3.71s/it] {'loss': 1.2837, 'grad_norm': 0.0009179435723149291, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:42<14:39, 3.71s/it] 55%|█████▍ | 284/520 [17:45<14:31, 3.69s/it] {'loss': 1.1462, 'grad_norm': 0.0008826635466662627, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:45<14:31, 3.69s/it] 55%|█████▍ | 285/520 [17:49<14:25, 3.68s/it] {'loss': 1.17, 'grad_norm': 0.0008262868907538045, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:49<14:25, 3.68s/it] 55%|█████▌ | 286/520 [17:53<14:20, 3.68s/it] {'loss': 1.0559, 'grad_norm': 0.0008441847919315457, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:53<14:20, 3.68s/it] 55%|█████▌ | 287/520 [17:56<14:17, 3.68s/it] {'loss': 1.2781, 'grad_norm': 0.0008354226781031481, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [17:56<14:17, 3.68s/it] 55%|█████▌ | 288/520 [18:00<14:12, 3.67s/it] {'loss': 1.3021, 'grad_norm': 0.0008106885154499577, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:00<14:12, 3.67s/it] 56%|█████▌ | 289/520 [18:04<14:07, 3.67s/it] {'loss': 1.1832, 'grad_norm': 0.0008164787659237458, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:04<14:07, 3.67s/it] 56%|█████▌ | 290/520 [18:07<14:00, 3.66s/it] {'loss': 1.1102, 'grad_norm': 0.0007783916136025666, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:07<14:00, 3.66s/it] 56%|█████▌ | 291/520 [18:11<13:56, 3.65s/it] {'loss': 1.1536, 'grad_norm': 0.0008092984317348399, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:11<13:56, 3.65s/it] 56%|█████▌ | 292/520 [18:15<13:51, 3.65s/it] {'loss': 1.2029, 'grad_norm': 0.0008212759363324468, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:15<13:51, 3.65s/it] 56%|█████▋ | 293/520 [18:18<13:49, 3.65s/it] {'loss': 1.1576, 'grad_norm': 0.0008931003406634992, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:18<13:49, 3.65s/it] 57%|█████▋ | 294/520 [18:22<13:45, 3.65s/it] {'loss': 1.1756, 'grad_norm': 0.0008884069235047775, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:22<13:45, 3.65s/it] 57%|█████▋ | 295/520 [18:26<13:41, 3.65s/it] {'loss': 1.1793, 'grad_norm': 0.0008110186237736827, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:26<13:41, 3.65s/it] 57%|█████▋ | 296/520 [18:29<13:39, 3.66s/it] {'loss': 1.1264, 'grad_norm': 0.0008852476624773627, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:29<13:39, 3.66s/it] 57%|█████▋ | 297/520 [18:33<13:34, 3.65s/it] {'loss': 1.2583, 'grad_norm': 0.0009326014531250028, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:33<13:34, 3.65s/it] 57%|█████▋ | 298/520 [18:37<13:31, 3.65s/it] {'loss': 1.2169, 'grad_norm': 0.0008128326721838657, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:37<13:31, 3.65s/it] 57%|█████▊ | 299/520 [18:40<13:26, 3.65s/it] {'loss': 1.2218, 'grad_norm': 0.0007803437073994041, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:40<13:26, 3.65s/it] 58%|█████▊ | 300/520 [18:44<13:21, 3.64s/it] {'loss': 1.2662, 'grad_norm': 0.0008370041563968235, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:44<13:21, 3.64s/it] 58%|█████▊ | 301/520 [18:47<13:17, 3.64s/it] {'loss': 1.2488, 'grad_norm': 0.000837974054025644, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:47<13:17, 3.64s/it] 58%|█████▊ | 302/520 [18:51<13:12, 3.63s/it] {'loss': 1.2318, 'grad_norm': 0.0008378735050562897, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:51<13:12, 3.63s/it] 58%|█████▊ | 303/520 [18:55<13:08, 3.64s/it] {'loss': 1.1723, 'grad_norm': 0.0009269192607306704, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:55<13:08, 3.64s/it] 58%|█████▊ | 304/520 [18:59<13:38, 3.79s/it] {'loss': 1.1423, 'grad_norm': 0.0008750436476620099, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [18:59<13:38, 3.79s/it] 59%|█████▊ | 305/520 [19:02<13:25, 3.75s/it] {'loss': 1.2771, 'grad_norm': 0.0009900976305929397, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:03<13:25, 3.75s/it] 59%|█████▉ | 306/520 [19:06<13:19, 3.73s/it] {'loss': 1.2225, 'grad_norm': 0.0008585106077229409, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:06<13:19, 3.73s/it] 59%|█████▉ | 307/520 [19:10<13:09, 3.71s/it] {'loss': 1.1637, 'grad_norm': 0.0008300598814193803, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:10<13:09, 3.71s/it] 59%|█████▉ | 308/520 [19:13<12:59, 3.68s/it] {'loss': 1.2741, 'grad_norm': 0.0008239827703222252, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:13<12:59, 3.68s/it] 59%|█████▉ | 309/520 [19:17<12:53, 3.67s/it] {'loss': 1.1656, 'grad_norm': 0.0008182897106137494, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:17<12:53, 3.67s/it] 60%|█████▉ | 310/520 [19:21<12:46, 3.65s/it] {'loss': 1.1461, 'grad_norm': 0.0008427980739957835, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:21<12:46, 3.65s/it] 60%|█████▉ | 311/520 [19:24<12:41, 3.64s/it] {'loss': 1.1239, 'grad_norm': 0.0008292510496631185, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:24<12:41, 3.64s/it] 60%|██████ | 312/520 [19:28<12:37, 3.64s/it] {'loss': 1.1149, 'grad_norm': 0.0008529640973921884, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:28<12:37, 3.64s/it] 60%|██████ | 313/520 [19:32<12:33, 3.64s/it] {'loss': 1.1019, 'grad_norm': 0.0007744830541547965, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:32<12:33, 3.64s/it] 60%|██████ | 314/520 [19:36<12:52, 3.75s/it] {'loss': 1.1366, 'grad_norm': 0.0008076263114549076, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:36<12:52, 3.75s/it] 61%|██████ | 315/520 [19:39<12:40, 3.71s/it] {'loss': 1.1877, 'grad_norm': 0.0009331967195149447, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:39<12:40, 3.71s/it] 61%|██████ | 316/520 [19:43<12:55, 3.80s/it] {'loss': 1.1203, 'grad_norm': 0.0008552963589275534, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:43<12:55, 3.80s/it] 61%|██████ | 317/520 [19:47<12:43, 3.76s/it] {'loss': 1.1276, 'grad_norm': 0.0007455091878429825, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:47<12:43, 3.76s/it] 61%|██████ | 318/520 [19:51<12:31, 3.72s/it] {'loss': 1.237, 'grad_norm': 0.0008963939117266206, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:51<12:31, 3.72s/it] 61%|██████▏ | 319/520 [19:55<12:43, 3.80s/it] {'loss': 1.1203, 'grad_norm': 0.0007579639230116187, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [19:55<12:43, 3.80s/it] 62%|██████▏ | 320/520 [19:58<12:31, 3.76s/it] {'loss': 1.068, 'grad_norm': 0.000840502180210542, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [19:58<12:31, 3.76s/it] 62%|██████▏ | 321/520 [20:02<12:23, 3.74s/it] {'loss': 1.2603, 'grad_norm': 0.0008220160688228669, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:02<12:23, 3.74s/it] 62%|██████▏ | 322/520 [20:05<12:12, 3.70s/it] {'loss': 1.0914, 'grad_norm': 0.0007905505442108632, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:05<12:12, 3.70s/it] 62%|██████▏ | 323/520 [20:09<12:06, 3.69s/it] {'loss': 1.1586, 'grad_norm': 0.000833557542120805, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:09<12:06, 3.69s/it] 62%|██████▏ | 324/520 [20:13<12:00, 3.67s/it] {'loss': 1.2034, 'grad_norm': 0.0008286301194559908, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:13<12:00, 3.67s/it] 62%|██████▎ | 325/520 [20:16<11:57, 3.68s/it] {'loss': 1.2036, 'grad_norm': 0.0009043233730000016, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:16<11:57, 3.68s/it] 63%|██████▎ | 326/520 [20:20<11:51, 3.67s/it] {'loss': 1.1998, 'grad_norm': 0.0008674781978057866, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:20<11:51, 3.67s/it] 63%|██████▎ | 327/520 [20:24<11:50, 3.68s/it] {'loss': 1.1955, 'grad_norm': 0.0008612751439794073, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:24<11:50, 3.68s/it] 63%|██████▎ | 328/520 [20:28<11:58, 3.74s/it] {'loss': 1.2427, 'grad_norm': 0.0008584785305519298, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:28<11:58, 3.74s/it] 63%|██████▎ | 329/520 [20:32<11:57, 3.76s/it] {'loss': 1.1238, 'grad_norm': 0.0007352143314239708, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:32<11:57, 3.76s/it] 63%|██████▎ | 330/520 [20:35<11:46, 3.72s/it] {'loss': 1.195, 'grad_norm': 0.0007645884550842013, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:35<11:46, 3.72s/it] 64%|██████▎ | 331/520 [20:39<11:38, 3.69s/it] {'loss': 1.1572, 'grad_norm': 0.0008399877671989279, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:39<11:38, 3.69s/it] 64%|██████▍ | 332/520 [20:42<11:32, 3.68s/it] {'loss': 1.2214, 'grad_norm': 0.0007585179938682476, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:42<11:32, 3.68s/it] 64%|██████▍ | 333/520 [20:46<11:24, 3.66s/it] {'loss': 1.293, 'grad_norm': 0.0008956120985214432, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:46<11:24, 3.66s/it] 64%|██████▍ | 334/520 [20:50<11:21, 3.66s/it] {'loss': 1.2058, 'grad_norm': 0.0008759917288766452, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:50<11:21, 3.66s/it] 64%|██████▍ | 335/520 [20:53<11:15, 3.65s/it] {'loss': 1.2028, 'grad_norm': 0.0007910790225206046, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:53<11:15, 3.65s/it] 65%|██████▍ | 336/520 [20:57<11:13, 3.66s/it] {'loss': 1.1065, 'grad_norm': 0.0008925316126339081, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [20:57<11:13, 3.66s/it] 65%|██████▍ | 337/520 [21:01<11:08, 3.65s/it] {'loss': 1.0976, 'grad_norm': 0.0008104854703844233, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:01<11:08, 3.65s/it] 65%|██████▌ | 338/520 [21:04<11:06, 3.66s/it] {'loss': 1.2067, 'grad_norm': 0.0008290116011520827, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:04<11:06, 3.66s/it] 65%|██████▌ | 339/520 [21:08<11:10, 3.71s/it] {'loss': 1.1531, 'grad_norm': 0.0008522746967573424, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:08<11:10, 3.71s/it] 65%|██████▌ | 340/520 [21:12<11:14, 3.75s/it] {'loss': 1.1425, 'grad_norm': 0.0008218331735305781, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:12<11:14, 3.75s/it] 66%|██████▌ | 341/520 [21:16<11:17, 3.78s/it] {'loss': 1.1679, 'grad_norm': 0.0008737252042347222, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:16<11:17, 3.78s/it] 66%|██████▌ | 342/520 [21:20<11:15, 3.80s/it] {'loss': 1.1929, 'grad_norm': 0.0009421901298890021, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:20<11:15, 3.80s/it] 66%|██████▌ | 343/520 [21:24<11:14, 3.81s/it] {'loss': 1.148, 'grad_norm': 0.0007044851998647772, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:24<11:14, 3.81s/it] 66%|██████▌ | 344/520 [21:27<11:13, 3.82s/it] {'loss': 1.1244, 'grad_norm': 0.0007601234495601581, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:27<11:13, 3.82s/it] 66%|██████▋ | 345/520 [21:31<11:08, 3.82s/it] {'loss': 1.2268, 'grad_norm': 0.0008417279606074695, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:31<11:08, 3.82s/it] 67%|██████▋ | 346/520 [21:35<11:03, 3.81s/it] {'loss': 1.1668, 'grad_norm': 0.0008292408074934397, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:35<11:03, 3.81s/it] 67%|██████▋ | 347/520 [21:39<11:01, 3.82s/it] {'loss': 1.1407, 'grad_norm': 0.0007530198966912518, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:39<11:01, 3.82s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:43<10:57, 3.82s/it] {'loss': 1.1004, 'grad_norm': 0.0009637640300058055, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:43<10:57, 3.82s/it] 67%|██████▋ | 349/520 [21:46<10:54, 3.83s/it] {'loss': 1.1374, 'grad_norm': 0.0008202675484246231, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:46<10:54, 3.83s/it] 67%|██████▋ | 350/520 [21:50<10:49, 3.82s/it] {'loss': 1.1784, 'grad_norm': 0.00088924372603856, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:50<10:49, 3.82s/it] 68%|██████▊ | 351/520 [21:54<10:45, 3.82s/it] {'loss': 1.0911, 'grad_norm': 0.0007742482500918683, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:54<10:45, 3.82s/it] 68%|██████▊ | 352/520 [21:58<10:33, 3.77s/it] {'loss': 1.2073, 'grad_norm': 0.0007883734427059809, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [21:58<10:33, 3.77s/it] 68%|██████▊ | 353/520 [22:01<10:23, 3.73s/it] {'loss': 1.1314, 'grad_norm': 0.0006867290260043233, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:01<10:23, 3.73s/it] 68%|██████▊ | 354/520 [22:05<10:13, 3.69s/it] {'loss': 1.2336, 'grad_norm': 0.0007789401479418959, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:05<10:13, 3.69s/it] 68%|██████▊ | 355/520 [22:09<10:05, 3.67s/it] {'loss': 1.1491, 'grad_norm': 0.0007923757429242234, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:09<10:05, 3.67s/it] 68%|██████▊ | 356/520 [22:12<09:59, 3.65s/it] {'loss': 1.1524, 'grad_norm': 0.0008299259106484075, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:12<09:59, 3.65s/it] 69%|██████▊ | 357/520 [22:16<09:52, 3.64s/it] {'loss': 1.1822, 'grad_norm': 0.0007755536586984816, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:16<09:52, 3.64s/it] 69%|██████▉ | 358/520 [22:20<09:59, 3.70s/it] {'loss': 1.117, 'grad_norm': 0.0008175489266488804, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:20<09:59, 3.70s/it] 69%|██████▉ | 359/520 [22:24<10:02, 3.75s/it] {'loss': 1.1716, 'grad_norm': 0.0008250670140218797, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:24<10:02, 3.75s/it] 69%|██████▉ | 360/520 [22:27<09:58, 3.74s/it] {'loss': 1.1818, 'grad_norm': 0.000856553524706881, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:27<09:58, 3.74s/it] 69%|██████▉ | 361/520 [22:31<09:50, 3.71s/it] {'loss': 1.1922, 'grad_norm': 0.0007514714835983991, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:31<09:50, 3.71s/it] 70%|██████▉ | 362/520 [22:35<09:42, 3.69s/it] {'loss': 1.1678, 'grad_norm': 0.000873103192268352, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:35<09:42, 3.69s/it] 70%|██████▉ | 363/520 [22:38<09:35, 3.66s/it] {'loss': 1.1917, 'grad_norm': 0.0008333503330168814, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:38<09:35, 3.66s/it] 70%|███████ | 364/520 [22:42<09:30, 3.66s/it] {'loss': 1.2125, 'grad_norm': 0.0008303988068838057, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:42<09:30, 3.66s/it] 70%|███████ | 365/520 [22:45<09:24, 3.65s/it] {'loss': 1.2436, 'grad_norm': 0.0008478226318198555, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:45<09:24, 3.65s/it] 70%|███████ | 366/520 [22:49<09:20, 3.64s/it] {'loss': 1.2049, 'grad_norm': 0.0007746406671588508, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:49<09:20, 3.64s/it] 71%|███████ | 367/520 [22:53<09:19, 3.66s/it] {'loss': 1.2061, 'grad_norm': 0.0008366153152921318, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [22:53<09:19, 3.66s/it] 71%|███████ | 368/520 [22:56<09:15, 3.65s/it] {'loss': 1.0605, 'grad_norm': 0.0008191213980094132, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [22:56<09:15, 3.65s/it] 71%|███████ | 369/520 [23:00<09:10, 3.65s/it] {'loss': 1.1681, 'grad_norm': 0.000754656941951217, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:00<09:10, 3.65s/it] 71%|███████ | 370/520 [23:04<09:06, 3.64s/it] {'loss': 1.1224, 'grad_norm': 0.0007931384668212126, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:04<09:06, 3.64s/it] 71%|███████▏ | 371/520 [23:07<09:02, 3.64s/it] {'loss': 1.1202, 'grad_norm': 0.0008589935766637501, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:07<09:02, 3.64s/it] 72%|███████▏ | 372/520 [23:11<08:58, 3.64s/it] {'loss': 1.241, 'grad_norm': 0.0007714863102734403, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:11<08:58, 3.64s/it] 72%|███████▏ | 373/520 [23:15<08:54, 3.64s/it] {'loss': 1.1296, 'grad_norm': 0.0008868387983806919, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:15<08:54, 3.64s/it] 72%|███████▏ | 374/520 [23:18<08:51, 3.64s/it] {'loss': 1.2079, 'grad_norm': 0.0008370344797204427, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:18<08:51, 3.64s/it] 72%|███████▏ | 375/520 [23:22<08:46, 3.63s/it] {'loss': 1.1265, 'grad_norm': 0.000805853883695964, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:22<08:46, 3.63s/it] 72%|███████▏ | 376/520 [23:25<08:43, 3.63s/it] {'loss': 1.2307, 'grad_norm': 0.0007881414271013413, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:25<08:43, 3.63s/it] 72%|███████▎ | 377/520 [23:29<08:39, 3.63s/it] {'loss': 1.1612, 'grad_norm': 0.0010132243029193798, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:29<08:39, 3.63s/it] 73%|███████▎ | 378/520 [23:33<08:34, 3.62s/it] {'loss': 1.2251, 'grad_norm': 0.0007752828202841085, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:33<08:34, 3.62s/it] 73%|███████▎ | 379/520 [23:36<08:29, 3.61s/it] {'loss': 1.1986, 'grad_norm': 0.0007803272163939704, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:36<08:29, 3.61s/it] 73%|███████▎ | 380/520 [23:40<08:25, 3.61s/it] {'loss': 1.218, 'grad_norm': 0.0008162674250872589, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:40<08:25, 3.61s/it] 73%|███████▎ | 381/520 [23:44<08:23, 3.62s/it] {'loss': 1.2042, 'grad_norm': 0.0007815446072059464, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:44<08:23, 3.62s/it] 73%|███████▎ | 382/520 [23:47<08:21, 3.63s/it] {'loss': 1.1832, 'grad_norm': 0.000772265316001045, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:47<08:21, 3.63s/it] 74%|███████▎ | 383/520 [23:51<08:16, 3.62s/it] {'loss': 1.0443, 'grad_norm': 0.0009019932071297665, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:51<08:16, 3.62s/it] 74%|███████▍ | 384/520 [23:54<08:13, 3.63s/it] {'loss': 1.2171, 'grad_norm': 0.0007412065207654519, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [23:54<08:13, 3.63s/it] 74%|███████▍ | 385/520 [23:58<08:09, 3.62s/it] {'loss': 1.1817, 'grad_norm': 0.0007619678912711165, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [23:58<08:09, 3.62s/it] 74%|███████▍ | 386/520 [24:02<08:04, 3.62s/it] {'loss': 1.1395, 'grad_norm': 0.0007295733487181557, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:02<08:04, 3.62s/it] 74%|███████▍ | 387/520 [24:05<08:00, 3.62s/it] {'loss': 1.2407, 'grad_norm': 0.0007939988570250165, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:05<08:00, 3.62s/it] 75%|███████▍ | 388/520 [24:09<07:57, 3.62s/it] {'loss': 1.0959, 'grad_norm': 0.0007782632159891567, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:09<07:57, 3.62s/it] 75%|███████▍ | 389/520 [24:12<07:53, 3.62s/it] {'loss': 1.1412, 'grad_norm': 0.0009203985134806587, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:12<07:53, 3.62s/it] 75%|███████▌ | 390/520 [24:16<07:50, 3.62s/it] {'loss': 1.206, 'grad_norm': 0.0008009261226563981, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:16<07:50, 3.62s/it] 75%|███████▌ | 391/520 [24:20<07:47, 3.63s/it] {'loss': 1.2745, 'grad_norm': 0.0008366119029956559, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:20<07:47, 3.63s/it] 75%|███████▌ | 392/520 [24:23<07:46, 3.64s/it] {'loss': 1.0965, 'grad_norm': 0.000786948173732703, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:23<07:46, 3.64s/it] 76%|███████▌ | 393/520 [24:27<07:41, 3.63s/it] {'loss': 1.0931, 'grad_norm': 0.0006965869417670898, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:27<07:41, 3.63s/it] 76%|███████▌ | 394/520 [24:31<07:36, 3.63s/it] {'loss': 1.1665, 'grad_norm': 0.0008658433669108791, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:31<07:36, 3.63s/it] 76%|███████▌ | 395/520 [24:34<07:36, 3.65s/it] {'loss': 1.1288, 'grad_norm': 0.0008641584953623817, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:34<07:36, 3.65s/it] 76%|███████▌ | 396/520 [24:38<07:34, 3.66s/it] {'loss': 1.2097, 'grad_norm': 0.000867991545973481, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:38<07:34, 3.66s/it] 76%|███████▋ | 397/520 [24:42<07:29, 3.65s/it] {'loss': 1.1821, 'grad_norm': 0.0007914748111499261, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:42<07:29, 3.65s/it] 77%|███████▋ | 398/520 [24:45<07:26, 3.66s/it] {'loss': 1.1832, 'grad_norm': 0.000849809828662944, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:45<07:26, 3.66s/it] 77%|███████▋ | 399/520 [24:49<07:22, 3.66s/it] {'loss': 1.1286, 'grad_norm': 0.0007676425869438416, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [24:49<07:22, 3.66s/it] 77%|███████▋ | 400/520 [24:53<07:19, 3.66s/it] {'loss': 1.159, 'grad_norm': 0.0007213085891030215, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [24:53<07:19, 3.66s/it] 77%|███████▋ | 401/520 [24:56<07:14, 3.65s/it] {'loss': 1.0197, 'grad_norm': 0.0008708654362760704, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [24:56<07:14, 3.65s/it] 77%|███████▋ | 402/520 [25:00<07:09, 3.64s/it] {'loss': 1.1476, 'grad_norm': 0.0008271548694802019, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:00<07:09, 3.64s/it] 78%|███████▊ | 403/520 [25:04<07:05, 3.64s/it] {'loss': 1.1679, 'grad_norm': 0.000878266343471464, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:04<07:05, 3.64s/it] 78%|███████▊ | 404/520 [25:07<07:01, 3.63s/it] {'loss': 1.0824, 'grad_norm': 0.0009352939430020612, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:07<07:01, 3.63s/it] 78%|███████▊ | 405/520 [25:11<06:58, 3.64s/it] {'loss': 1.1417, 'grad_norm': 0.0008333159696057516, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:11<06:58, 3.64s/it] 78%|███████▊ | 406/520 [25:14<06:54, 3.64s/it] {'loss': 1.0579, 'grad_norm': 0.0009671341777445012, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:14<06:54, 3.64s/it] 78%|███████▊ | 407/520 [25:18<06:50, 3.63s/it] {'loss': 1.247, 'grad_norm': 0.0008339029334144051, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:18<06:50, 3.63s/it] 78%|███████▊ | 408/520 [25:22<06:45, 3.62s/it] {'loss': 1.161, 'grad_norm': 0.0009188018515116587, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:22<06:45, 3.62s/it] 79%|███████▊ | 409/520 [25:25<06:41, 3.62s/it] {'loss': 1.2771, 'grad_norm': 0.0008876601093232619, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:25<06:41, 3.62s/it] 79%|███████▉ | 410/520 [25:29<06:38, 3.63s/it] {'loss': 1.0182, 'grad_norm': 0.000832321249094543, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:29<06:38, 3.63s/it] 79%|███████▉ | 411/520 [25:33<06:35, 3.62s/it] {'loss': 1.2597, 'grad_norm': 0.00087900888378228, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:33<06:35, 3.62s/it] 79%|███████▉ | 412/520 [25:36<06:33, 3.64s/it] {'loss': 1.1657, 'grad_norm': 0.0008187890365899106, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:36<06:33, 3.64s/it] 79%|███████▉ | 413/520 [25:40<06:30, 3.65s/it] {'loss': 1.1541, 'grad_norm': 0.0007755545881836989, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:40<06:30, 3.65s/it] 80%|███████▉ | 414/520 [25:44<06:26, 3.65s/it] {'loss': 0.9678, 'grad_norm': 0.0006843700015526253, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:44<06:26, 3.65s/it] 80%|███████▉ | 415/520 [25:47<06:22, 3.64s/it] {'loss': 1.1476, 'grad_norm': 0.0007737538292464107, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [25:47<06:22, 3.64s/it] 80%|████████ | 416/520 [25:51<06:18, 3.64s/it] {'loss': 1.059, 'grad_norm': 0.0008832085135757448, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [25:51<06:18, 3.64s/it] 80%|████████ | 417/520 [25:54<06:13, 3.63s/it] {'loss': 1.2203, 'grad_norm': 0.0008365849912858326, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [25:54<06:13, 3.63s/it] 80%|████████ | 418/520 [25:58<06:10, 3.64s/it] {'loss': 1.2116, 'grad_norm': 0.0007872646130559536, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [25:58<06:10, 3.64s/it] 81%|████████ | 419/520 [26:02<06:07, 3.64s/it] {'loss': 1.204, 'grad_norm': 0.0008884794958332769, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:02<06:07, 3.64s/it] 81%|████████ | 420/520 [26:05<06:03, 3.64s/it] {'loss': 1.0972, 'grad_norm': 0.0008664030719791136, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:05<06:03, 3.64s/it] 81%|████████ | 421/520 [26:09<06:00, 3.64s/it] {'loss': 1.0338, 'grad_norm': 0.0008863681015585159, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:09<06:00, 3.64s/it] 81%|████████ | 422/520 [26:13<05:56, 3.64s/it] {'loss': 1.1517, 'grad_norm': 0.0008627301928934337, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:13<05:56, 3.64s/it] 81%|████████▏ | 423/520 [26:16<05:52, 3.63s/it] {'loss': 1.1251, 'grad_norm': 0.0008898268742543736, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:16<05:52, 3.63s/it] 82%|████████▏ | 424/520 [26:20<05:49, 3.64s/it] {'loss': 1.2403, 'grad_norm': 0.0007737559787108354, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:20<05:49, 3.64s/it] 82%|████████▏ | 425/520 [26:24<05:45, 3.64s/it] {'loss': 1.1451, 'grad_norm': 0.0008197651168111973, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:24<05:45, 3.64s/it] 82%|████████▏ | 426/520 [26:27<05:41, 3.63s/it] {'loss': 1.17, 'grad_norm': 0.001053181148752558, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:27<05:41, 3.63s/it] 82%|████████▏ | 427/520 [26:31<05:37, 3.62s/it] {'loss': 1.0778, 'grad_norm': 0.0007951509472679883, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:31<05:37, 3.62s/it] 82%|████████▏ | 428/520 [26:34<05:32, 3.62s/it] {'loss': 1.0649, 'grad_norm': 0.0008674578785102674, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:34<05:32, 3.62s/it] 82%|████████▎ | 429/520 [26:38<05:29, 3.62s/it] {'loss': 1.157, 'grad_norm': 0.0008396793934313469, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:38<05:29, 3.62s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:42<05:26, 3.62s/it] {'loss': 1.1576, 'grad_norm': 0.0007664872848366421, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:42<05:26, 3.62s/it] 83%|████████▎ | 431/520 [26:45<05:23, 3.63s/it] {'loss': 1.1281, 'grad_norm': 0.0008099052982262072, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [26:45<05:23, 3.63s/it] 83%|████████▎ | 432/520 [26:49<05:21, 3.65s/it] {'loss': 1.0709, 'grad_norm': 0.0008368408967941295, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [26:49<05:21, 3.65s/it] 83%|████████▎ | 433/520 [26:53<05:16, 3.64s/it] {'loss': 1.2009, 'grad_norm': 0.0008004833603485773, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [26:53<05:16, 3.64s/it] 83%|████████▎ | 434/520 [26:56<05:14, 3.66s/it] {'loss': 0.9483, 'grad_norm': 0.0008199388176044525, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [26:56<05:14, 3.66s/it] 84%|████████▎ | 435/520 [27:00<05:10, 3.65s/it] {'loss': 1.2327, 'grad_norm': 0.0008910886074463477, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:00<05:10, 3.65s/it] 84%|████████▍ | 436/520 [27:04<05:06, 3.65s/it] {'loss': 1.041, 'grad_norm': 0.0008538610772069738, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:04<05:06, 3.65s/it] 84%|████████▍ | 437/520 [27:07<05:02, 3.65s/it] {'loss': 1.2534, 'grad_norm': 0.0008352430112109951, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:07<05:02, 3.65s/it] 84%|████████▍ | 438/520 [27:11<04:58, 3.64s/it] {'loss': 1.0774, 'grad_norm': 0.0008273711591199433, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:11<04:58, 3.64s/it] 84%|████████▍ | 439/520 [27:15<04:55, 3.65s/it] {'loss': 1.1123, 'grad_norm': 0.0006726504201517369, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:15<04:55, 3.65s/it] 85%|████████▍ | 440/520 [27:18<04:51, 3.64s/it] {'loss': 1.1114, 'grad_norm': 0.0008198233571330727, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:18<04:51, 3.64s/it] 85%|████████▍ | 441/520 [27:22<04:48, 3.65s/it] {'loss': 1.1241, 'grad_norm': 0.0007929030212771095, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:22<04:48, 3.65s/it] 85%|████████▌ | 442/520 [27:25<04:44, 3.64s/it] {'loss': 1.176, 'grad_norm': 0.0008941304345610933, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:25<04:44, 3.64s/it] 85%|████████▌ | 443/520 [27:29<04:40, 3.64s/it] {'loss': 1.1887, 'grad_norm': 0.0007993011832268276, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:29<04:40, 3.64s/it] 85%|████████▌ | 444/520 [27:33<04:36, 3.64s/it] {'loss': 1.1501, 'grad_norm': 0.0007367048458720883, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:33<04:36, 3.64s/it] 86%|████████▌ | 445/520 [27:36<04:32, 3.64s/it] {'loss': 1.0796, 'grad_norm': 0.0007881382233712635, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:36<04:32, 3.64s/it] 86%|████████▌ | 446/520 [27:40<04:28, 3.63s/it] {'loss': 1.1996, 'grad_norm': 0.0007320376213486949, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:40<04:28, 3.63s/it] 86%|████████▌ | 447/520 [27:44<04:25, 3.64s/it] {'loss': 1.1539, 'grad_norm': 0.0007932167678025334, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [27:44<04:25, 3.64s/it] 86%|████████▌ | 448/520 [27:47<04:24, 3.67s/it] {'loss': 1.1505, 'grad_norm': 0.0008463256024006851, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [27:47<04:24, 3.67s/it] 86%|████████▋ | 449/520 [27:51<04:20, 3.66s/it] {'loss': 1.1604, 'grad_norm': 0.0008198695536443776, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [27:51<04:20, 3.66s/it] 87%|████████▋ | 450/520 [27:55<04:16, 3.67s/it] {'loss': 1.1778, 'grad_norm': 0.0008312377912707862, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [27:55<04:16, 3.67s/it] 87%|████████▋ | 451/520 [27:58<04:14, 3.68s/it] {'loss': 1.1758, 'grad_norm': 0.0008534950395005635, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [27:58<04:14, 3.68s/it] 87%|████████▋ | 452/520 [28:02<04:09, 3.68s/it] {'loss': 1.2026, 'grad_norm': 0.0007469734063186654, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:02<04:09, 3.68s/it] 87%|████████▋ | 453/520 [28:06<04:05, 3.66s/it] {'loss': 1.1784, 'grad_norm': 0.0007693192988561137, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:06<04:05, 3.66s/it] 87%|████████▋ | 454/520 [28:09<04:04, 3.70s/it] {'loss': 1.0873, 'grad_norm': 0.0008326434106260913, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:09<04:04, 3.70s/it] 88%|████████▊ | 455/520 [28:13<04:03, 3.74s/it] {'loss': 1.227, 'grad_norm': 0.0008145419122954329, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:13<04:03, 3.74s/it] 88%|████████▊ | 456/520 [28:17<04:01, 3.78s/it] {'loss': 1.1566, 'grad_norm': 0.0008325518989272759, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:17<04:01, 3.78s/it] 88%|████████▊ | 457/520 [28:21<03:59, 3.80s/it] {'loss': 1.0767, 'grad_norm': 0.0007035073212527844, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:21<03:59, 3.80s/it] 88%|████████▊ | 458/520 [28:25<03:56, 3.82s/it] {'loss': 1.2783, 'grad_norm': 0.0008983727351439445, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:25<03:56, 3.82s/it] 88%|████████▊ | 459/520 [28:29<03:53, 3.83s/it] {'loss': 1.2085, 'grad_norm': 0.000831654637041464, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:29<03:53, 3.83s/it] 88%|████████▊ | 460/520 [28:33<03:49, 3.83s/it] {'loss': 1.1013, 'grad_norm': 0.0007922581733507217, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:33<03:49, 3.83s/it] 89%|████████▊ | 461/520 [28:36<03:42, 3.77s/it] {'loss': 1.1598, 'grad_norm': 0.0006172095305983792, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:36<03:42, 3.77s/it] 89%|████████▉ | 462/520 [28:40<03:36, 3.73s/it] {'loss': 1.2491, 'grad_norm': 0.0007777351538678702, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:40<03:36, 3.73s/it] 89%|████████▉ | 463/520 [28:44<03:31, 3.71s/it] {'loss': 1.0653, 'grad_norm': 0.0008493884547673142, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [28:44<03:31, 3.71s/it] 89%|████████▉ | 464/520 [28:47<03:27, 3.70s/it] {'loss': 1.1905, 'grad_norm': 0.0008414732012941071, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [28:47<03:27, 3.70s/it] 89%|████████▉ | 465/520 [28:51<03:22, 3.68s/it] {'loss': 1.2925, 'grad_norm': 0.0008487163423863374, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [28:51<03:22, 3.68s/it] 90%|████████▉ | 466/520 [28:54<03:17, 3.67s/it] {'loss': 1.185, 'grad_norm': 0.0007681610372611094, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [28:54<03:17, 3.67s/it] 90%|████████▉ | 467/520 [28:58<03:13, 3.66s/it] {'loss': 1.1383, 'grad_norm': 0.0007429604442949538, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [28:58<03:13, 3.66s/it] 90%|█████████ | 468/520 [29:02<03:10, 3.66s/it] {'loss': 1.1581, 'grad_norm': 0.0009310317509837113, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:02<03:10, 3.66s/it] 90%|█████████ | 469/520 [29:05<03:06, 3.66s/it] {'loss': 1.2248, 'grad_norm': 0.0009002346778314702, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:05<03:06, 3.66s/it] 90%|█████████ | 470/520 [29:09<03:02, 3.65s/it] {'loss': 1.098, 'grad_norm': 0.000760764924873253, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:09<03:02, 3.65s/it] 91%|█████████ | 471/520 [29:13<02:58, 3.64s/it] {'loss': 1.1233, 'grad_norm': 0.0008631140276665848, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:13<02:58, 3.64s/it] 91%|█████████ | 472/520 [29:16<02:55, 3.65s/it] {'loss': 1.0936, 'grad_norm': 0.0008077323830922608, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:16<02:55, 3.65s/it] 91%|█████████ | 473/520 [29:20<02:51, 3.65s/it] {'loss': 1.1582, 'grad_norm': 0.0008794015109065814, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:20<02:51, 3.65s/it] 91%|█████████ | 474/520 [29:24<02:48, 3.66s/it] {'loss': 1.1743, 'grad_norm': 0.0007580769865586297, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:24<02:48, 3.66s/it] 91%|█████████▏| 475/520 [29:28<02:47, 3.72s/it] {'loss': 1.0905, 'grad_norm': 0.0007443241211013828, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:28<02:47, 3.72s/it] 92%|█████████▏| 476/520 [29:31<02:45, 3.75s/it] {'loss': 1.1482, 'grad_norm': 0.0008538841245130548, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:31<02:45, 3.75s/it] 92%|█████████▏| 477/520 [29:35<02:42, 3.79s/it] {'loss': 1.1421, 'grad_norm': 0.0009120821168618981, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:35<02:42, 3.79s/it] 92%|█████████▏| 478/520 [29:39<02:40, 3.82s/it] {'loss': 1.088, 'grad_norm': 0.0008210765677976673, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:39<02:40, 3.82s/it] 92%|█████████▏| 479/520 [29:43<02:37, 3.84s/it] {'loss': 1.1413, 'grad_norm': 0.0008383741561737429, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:43<02:37, 3.84s/it] 92%|█████████▏| 480/520 [29:47<02:33, 3.84s/it] {'loss': 1.1591, 'grad_norm': 0.00076464012981822, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [29:47<02:33, 3.84s/it] 92%|█████████▎| 481/520 [29:51<02:30, 3.86s/it] {'loss': 1.1501, 'grad_norm': 0.0007353377434211987, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [29:51<02:30, 3.86s/it] 93%|█████████▎| 482/520 [29:54<02:23, 3.79s/it] {'loss': 1.168, 'grad_norm': 0.0007602696745484652, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [29:54<02:23, 3.79s/it] 93%|█████████▎| 483/520 [29:58<02:18, 3.74s/it] {'loss': 1.1555, 'grad_norm': 0.0008086168853162448, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [29:58<02:18, 3.74s/it] 93%|█████████▎| 484/520 [30:02<02:13, 3.71s/it] {'loss': 1.1624, 'grad_norm': 0.000846731926294143, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:02<02:13, 3.71s/it] 93%|█████████▎| 485/520 [30:05<02:10, 3.74s/it] {'loss': 1.1173, 'grad_norm': 0.0007843955698108272, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:05<02:10, 3.74s/it] 93%|█████████▎| 486/520 [30:09<02:05, 3.71s/it] {'loss': 1.2373, 'grad_norm': 0.0008550097005205708, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:09<02:05, 3.71s/it] 94%|█████████▎| 487/520 [30:13<02:01, 3.70s/it] {'loss': 1.0951, 'grad_norm': 0.0008486615560878042, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:13<02:01, 3.70s/it] 94%|█████████▍| 488/520 [30:16<01:57, 3.68s/it] {'loss': 1.0397, 'grad_norm': 0.0008574759701976329, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:16<01:57, 3.68s/it] 94%|█████████▍| 489/520 [30:20<01:53, 3.66s/it] {'loss': 1.1725, 'grad_norm': 0.0006948537058051714, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:20<01:53, 3.66s/it] 94%|█████████▍| 490/520 [30:24<01:49, 3.64s/it] {'loss': 1.1599, 'grad_norm': 0.0008334920404132068, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:24<01:49, 3.64s/it] 94%|█████████▍| 491/520 [30:27<01:45, 3.64s/it] {'loss': 1.1256, 'grad_norm': 0.0008559302387376812, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:27<01:45, 3.64s/it] 95%|█████████▍| 492/520 [30:31<01:41, 3.64s/it] {'loss': 1.2338, 'grad_norm': 0.0008525911609317417, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:31<01:41, 3.64s/it] 95%|█████████▍| 493/520 [30:35<01:38, 3.64s/it] {'loss': 1.1664, 'grad_norm': 0.0008081135297097763, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:35<01:38, 3.64s/it] 95%|█████████▌| 494/520 [30:38<01:34, 3.64s/it] {'loss': 1.1711, 'grad_norm': 0.0007615900068086018, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:38<01:34, 3.64s/it] 95%|█████████▌| 495/520 [30:42<01:30, 3.63s/it] {'loss': 1.1457, 'grad_norm': 0.0008526249438829468, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [30:42<01:30, 3.63s/it] 95%|█████████▌| 496/520 [30:45<01:27, 3.65s/it] {'loss': 1.0654, 'grad_norm': 0.000830902250970901, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [30:45<01:27, 3.65s/it] 96%|█████████▌| 497/520 [30:49<01:23, 3.64s/it] {'loss': 1.0997, 'grad_norm': 0.0006882899629043641, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [30:49<01:23, 3.64s/it] 96%|█████████▌| 498/520 [30:53<01:19, 3.63s/it] {'loss': 1.1357, 'grad_norm': 0.0008088544743931823, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [30:53<01:19, 3.63s/it] 96%|█████████▌| 499/520 [30:56<01:16, 3.64s/it] {'loss': 1.2364, 'grad_norm': 0.0008011387428143043, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [30:56<01:16, 3.64s/it] 96%|█████████▌| 500/520 [31:00<01:12, 3.64s/it] {'loss': 1.2566, 'grad_norm': 0.0009996032468675134, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:00<01:12, 3.64s/it] 96%|█████████▋| 501/520 [31:04<01:09, 3.63s/it] {'loss': 1.1442, 'grad_norm': 0.000875388970415023, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:04<01:09, 3.63s/it] 97%|█████████▋| 502/520 [31:07<01:05, 3.62s/it] {'loss': 1.1737, 'grad_norm': 0.0007800657675632539, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:07<01:05, 3.62s/it] 97%|█████████▋| 503/520 [31:11<01:01, 3.63s/it] {'loss': 1.1327, 'grad_norm': 0.0008364977408115998, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:11<01:01, 3.63s/it] 97%|█████████▋| 504/520 [31:15<00:58, 3.68s/it] {'loss': 1.166, 'grad_norm': 0.0009499207186656111, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:15<00:58, 3.68s/it] 97%|█████████▋| 505/520 [31:18<00:55, 3.73s/it] {'loss': 1.1947, 'grad_norm': 0.0008225823552414294, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:18<00:55, 3.73s/it] 97%|█████████▋| 506/520 [31:22<00:52, 3.75s/it] {'loss': 1.1313, 'grad_norm': 0.0008496619349266519, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:22<00:52, 3.75s/it] 98%|█████████▊| 507/520 [31:26<00:49, 3.77s/it] {'loss': 1.2748, 'grad_norm': 0.0007473473415722349, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:26<00:49, 3.77s/it] 98%|█████████▊| 508/520 [31:30<00:45, 3.79s/it] {'loss': 1.2446, 'grad_norm': 0.0008453474510393076, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:30<00:45, 3.79s/it] 98%|█████████▊| 509/520 [31:34<00:41, 3.81s/it] {'loss': 1.2177, 'grad_norm': 0.0007957145585718058, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:34<00:41, 3.81s/it] 98%|█████████▊| 510/520 [31:38<00:38, 3.82s/it] {'loss': 1.1654, 'grad_norm': 0.0008155230928690431, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:38<00:38, 3.82s/it] 98%|█████████▊| 511/520 [31:41<00:34, 3.81s/it] {'loss': 1.1341, 'grad_norm': 0.0007976358289346779, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [31:41<00:34, 3.81s/it] 98%|█████████▊| 512/520 [31:45<00:30, 3.81s/it] {'loss': 1.0228, 'grad_norm': 0.0008156049707896399, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [31:45<00:30, 3.81s/it] 99%|█████████▊| 513/520 [31:49<00:26, 3.81s/it] {'loss': 1.2206, 'grad_norm': 0.0009380600137513282, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [31:49<00:26, 3.81s/it] 99%|█████████▉| 514/520 [31:53<00:22, 3.81s/it] {'loss': 1.1866, 'grad_norm': 0.0007522229616860092, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [31:53<00:22, 3.81s/it] 99%|█████████▉| 515/520 [31:57<00:19, 3.81s/it] {'loss': 1.2341, 'grad_norm': 0.0009733680455813171, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [31:57<00:19, 3.81s/it] 99%|█████████▉| 516/520 [32:00<00:15, 3.80s/it] {'loss': 1.1478, 'grad_norm': 0.000802839439369973, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:00<00:15, 3.80s/it] 99%|█████████▉| 517/520 [32:04<00:11, 3.79s/it] {'loss': 1.173, 'grad_norm': 0.0007759904695454901, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:04<00:11, 3.79s/it] 100%|█████████▉| 518/520 [32:08<00:07, 3.79s/it] {'loss': 1.1563, 'grad_norm': 0.0008777123769018437, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:08<00:07, 3.79s/it] 100%|█████████▉| 519/520 [32:12<00:03, 3.79s/it] {'loss': 1.1428, 'grad_norm': 0.000788651417815453, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:12<00:03, 3.79s/it] 100%|██████████| 520/520 [32:16<00:00, 4.03s/it] {'loss': 1.1354, 'grad_norm': 0.0007658627964084884, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:16<00:00, 4.03s/it] {'train_runtime': 1936.9103, 'train_samples_per_second': 34.348, 'train_steps_per_second': 0.268, 'train_loss': 1.2190544316401848, 'epoch': 1.0} + 100%|██████████| 520/520 [32:16<00:00, 4.03s/it] 100%|██████████| 520/520 [32:16<00:00, 3.72s/it] +[2025-10-13 18:38:20,914] [INFO] [launch.py:348:main] Process 948064 exits successfully. +[2025-10-13 18:38:21,915] [INFO] [launch.py:348:main] Process 948065 exits successfully. +[2025-10-13 18:38:21,916] [INFO] [launch.py:348:main] Process 948066 exits successfully. +[2025-10-13 18:38:21,916] [INFO] [launch.py:348:main] Process 948060 exits successfully. +[2025-10-13 18:38:21,917] [INFO] [launch.py:348:main] Process 948063 exits successfully. +[2025-10-13 18:38:21,917] [INFO] [launch.py:348:main] Process 948061 exits successfully. +[2025-10-13 18:38:22,918] [INFO] [launch.py:348:main] Process 948062 exits successfully. +[2025-10-13 18:38:25,922] [INFO] [launch.py:348:main] Process 948059 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.3_2e-1_connector-5.0_1.3_2e-1_ablation_20251013_180430.log +Timestamp: 2025-10-13 18:38:28 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation_20251013_183828.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation_20251013_183828.log new file mode 100644 index 0000000000000000000000000000000000000000..aac35ddc459d101a006af54c91ea1b46039e34e3 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation_20251013_183828.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation_20251013_183828.log +Timestamp: 2025-10-13 18:38:28 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 18:38:31,384] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:34,346] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 18:38:34,348] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 1.5 --temperature_mlp_text 1.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 1.5 --temperature_mlp_vision 1.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 1.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 18:38:36,959] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:37,968] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 18:38:37,968] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 18:38:37,968] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 18:38:37,968] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 18:38:37,968] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 18:38:37,968] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 18:38:37,968] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 18:38:37,971] [INFO] [launch.py:253:main] process 967817 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:38:37,973] [INFO] [launch.py:253:main] process 967818 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:38:37,975] [INFO] [launch.py:253:main] process 967819 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:38:37,977] [INFO] [launch.py:253:main] process 967820 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:38:37,979] [INFO] [launch.py:253:main] process 967821 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:38:37,982] [INFO] [launch.py:253:main] process 967822 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:38:37,984] [INFO] [launch.py:253:main] process 967823 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 18:38:37,986] [INFO] [launch.py:253:main] process 967824 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 18:38:44,636] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:44,839] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:44,971] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:45,035] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:38:45,061] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:45,061] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:45,073] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:45,077] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:45,079] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 18:38:45,237] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:38:45,370] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:38:45,370] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 18:38:45,462] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:38:45,463] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:38:45,470] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:38:45,474] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 18:38:45,481] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.5, 'temperature_mlp': 1.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.5, + "temperature_mlp": 1.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:967817:967817 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967817:967817 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967817:967817 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:967817:967817 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:967817:967817 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:967817:967817 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:967824:967824 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:967824:967824 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967824:967824 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967819:967819 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:967819:967819 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967819:967819 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967824:967824 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:967824:967824 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:967824:967824 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:967819:967819 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:967819:967819 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:967819:967819 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:967821:967821 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:967821:967821 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967821:967821 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967821:967821 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:967821:967821 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:967821:967821 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:967820:967820 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:967820:967820 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967820:967820 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967820:967820 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:967820:967820 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:967820:967820 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:967823:967823 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:967823:967823 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967823:967823 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967823:967823 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:967823:967823 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:967823:967823 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:967818:967818 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:967818:967818 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967818:967818 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967818:967818 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:967818:967818 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:967818:967818 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:967822:967822 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:967822:967822 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967822:967822 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967822:967822 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:967822:967822 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:967822:967822 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO ncclCommInitRank comm 0x5587d7724bc0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x98947c8e3644c0f4 - Init START +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO ncclCommInitRank comm 0x563e7cb79aa0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x98947c8e3644c0f4 - Init START +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO ncclCommInitRank comm 0x55ea0105f340 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x98947c8e3644c0f4 - Init START +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO ncclCommInitRank comm 0x55d504dded80 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x98947c8e3644c0f4 - Init START +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO ncclCommInitRank comm 0x559c71521f60 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x98947c8e3644c0f4 - Init START +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO ncclCommInitRank comm 0x55e1a5d8ecd0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x98947c8e3644c0f4 - Init START +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO ncclCommInitRank comm 0x55c2f612e1a0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x98947c8e3644c0f4 - Init START +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO ncclCommInitRank comm 0x5593349213d0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x98947c8e3644c0f4 - Init START +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO comm 0x55ea0105f340 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO comm 0x563e7cb79aa0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO comm 0x55d504dded80 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO comm 0x5587d7724bc0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO comm 0x55e1a5d8ecd0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO comm 0x559c71521f60 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO comm 0x5593349213d0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO comm 0x55c2f612e1a0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:967823:969406 [6] NCCL INFO ncclCommInitRank comm 0x563e7cb79aa0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x98947c8e3644c0f4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967824:969393 [7] NCCL INFO ncclCommInitRank comm 0x55ea0105f340 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x98947c8e3644c0f4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967821:969404 [4] NCCL INFO ncclCommInitRank comm 0x55e1a5d8ecd0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x98947c8e3644c0f4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:967820:969405 [3] NCCL INFO ncclCommInitRank comm 0x559c71521f60 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x98947c8e3644c0f4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:967818:969407 [1] NCCL INFO ncclCommInitRank comm 0x55d504dded80 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x98947c8e3644c0f4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967817:969390 [0] NCCL INFO ncclCommInitRank comm 0x55c2f612e1a0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x98947c8e3644c0f4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:967819:969394 [2] NCCL INFO ncclCommInitRank comm 0x5593349213d0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x98947c8e3644c0f4 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:967822:969425 [5] NCCL INFO ncclCommInitRank comm 0x5587d7724bc0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x98947c8e3644c0f4 - Init COMPLETE +[2025-10-13 18:39:30,520] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 18:39:32,318] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 18:39:50,391 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 18:39:50,399 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:006->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:967818:974392 [1] NCCL INFO ncclCommInitRank comm 0x7ff26806a850 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x285080b8c88bb8e7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967820:974393 [3] NCCL INFO ncclCommInitRank comm 0x7f855406b250 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x285080b8c88bb8e7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967822:974390 [5] NCCL INFO ncclCommInitRank comm 0x7f3f8406aea0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x285080b8c88bb8e7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967824:974387 [7] NCCL INFO ncclCommInitRank comm 0x7f41b006a7e0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x285080b8c88bb8e7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967821:974391 [4] NCCL INFO ncclCommInitRank comm 0x7fe8fc06ada0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x285080b8c88bb8e7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967817:974386 [0] NCCL INFO ncclCommInitRank comm 0x7f655406ad00 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x285080b8c88bb8e7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967819:974388 [2] NCCL INFO ncclCommInitRank comm 0x7fb31c06a4a0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x285080b8c88bb8e7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:967823:974389 [6] NCCL INFO ncclCommInitRank comm 0x7f6bc406af00 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x285080b8c88bb8e7 - Init COMPLETE + 0%| | 1/520 [00:14<2:03:12, 14.24s/it] {'loss': 2.0564, 'grad_norm': 0.013505465650109775, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:03:12, 14.24s/it] 0%| | 2/520 [00:18<1:10:12, 8.13s/it] {'loss': 2.0612, 'grad_norm': 0.014577866376714198, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:10:12, 8.13s/it] 1%| | 3/520 [00:21<52:29, 6.09s/it] {'loss': 2.2037, 'grad_norm': 0.016816242090830644, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<52:29, 6.09s/it] 1%| | 4/520 [00:25<44:03, 5.12s/it] {'loss': 1.6794, 'grad_norm': 0.005125083939587982, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<44:03, 5.12s/it] 1%| | 5/520 [00:29<39:33, 4.61s/it] {'loss': 1.7002, 'grad_norm': 0.004457956710437221, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<39:33, 4.61s/it] 1%| | 6/520 [00:32<36:43, 4.29s/it] {'loss': 1.395, 'grad_norm': 0.0018505970706189981, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:43, 4.29s/it] 1%|▏ | 7/520 [00:36<34:50, 4.07s/it] {'loss': 1.4616, 'grad_norm': 0.0023479765447880266, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<34:50, 4.07s/it] 2%|▏ | 8/520 [00:40<35:36, 4.17s/it] {'loss': 1.4964, 'grad_norm': 0.0020368313695856475, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:36, 4.17s/it] 2%|▏ | 9/520 [00:45<35:59, 4.23s/it] {'loss': 1.533, 'grad_norm': 0.001489300344888045, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<35:59, 4.23s/it] 2%|▏ | 10/520 [00:48<34:23, 4.05s/it] {'loss': 1.3813, 'grad_norm': 0.0017422446975484604, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:23, 4.05s/it] 2%|▏ | 11/520 [00:52<33:39, 3.97s/it] {'loss': 1.4435, 'grad_norm': 0.001337143919070641, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<33:39, 3.97s/it] 2%|▏ | 12/520 [00:56<32:45, 3.87s/it] {'loss': 1.3381, 'grad_norm': 0.0015215697042600305, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<32:45, 3.87s/it][2025-10-13 18:40:55,210] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<33:56, 4.02s/it] {'loss': 1.3784, 'grad_norm': 0.001551850192076927, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<33:56, 4.02s/it] 3%|▎ | 14/520 [01:04<32:56, 3.91s/it] {'loss': 1.4295, 'grad_norm': 0.00165199179158431, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:04<32:56, 3.91s/it] 3%|▎ | 15/520 [01:07<32:15, 3.83s/it] {'loss': 1.3772, 'grad_norm': 0.0010965181720025612, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:15, 3.83s/it] 3%|▎ | 16/520 [01:11<31:45, 3.78s/it] {'loss': 1.3395, 'grad_norm': 0.0012680159060255315, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<31:45, 3.78s/it] 3%|▎ | 17/520 [01:15<31:24, 3.75s/it] {'loss': 1.4539, 'grad_norm': 0.0014621571798134475, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<31:24, 3.75s/it] 3%|▎ | 18/520 [01:18<31:10, 3.73s/it] {'loss': 1.3075, 'grad_norm': 0.0014339966079200792, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:10, 3.73s/it] 4%|▎ | 19/520 [01:22<30:58, 3.71s/it] {'loss': 1.3357, 'grad_norm': 0.0011417861930854162, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<30:58, 3.71s/it] 4%|▍ | 20/520 [01:26<30:48, 3.70s/it] {'loss': 1.2866, 'grad_norm': 0.0012057941464488759, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<30:48, 3.70s/it] 4%|▍ | 21/520 [01:29<30:50, 3.71s/it] {'loss': 1.3386, 'grad_norm': 0.001614020765170485, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<30:50, 3.71s/it] 4%|▍ | 22/520 [01:33<30:42, 3.70s/it] {'loss': 1.4366, 'grad_norm': 0.0011675377429824839, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<30:42, 3.70s/it] 4%|▍ | 23/520 [01:37<30:39, 3.70s/it] {'loss': 1.3892, 'grad_norm': 0.001188375428268322, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:39, 3.70s/it] 5%|▍ | 24/520 [01:41<30:32, 3.70s/it] {'loss': 1.3115, 'grad_norm': 0.0010566290108251007, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:32, 3.70s/it] 5%|▍ | 25/520 [01:44<30:24, 3.69s/it] {'loss': 1.3754, 'grad_norm': 0.0011473103830526197, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:24, 3.69s/it] 5%|▌ | 26/520 [01:48<30:17, 3.68s/it] {'loss': 1.3428, 'grad_norm': 0.0010926161274659256, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:17, 3.68s/it] 5%|▌ | 27/520 [01:51<30:08, 3.67s/it] {'loss': 1.2687, 'grad_norm': 0.0010517349235635357, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:51<30:08, 3.67s/it] 5%|▌ | 28/520 [01:55<30:04, 3.67s/it] {'loss': 1.2825, 'grad_norm': 0.001016061395572246, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:55<30:04, 3.67s/it] 6%|▌ | 29/520 [01:59<30:03, 3.67s/it] {'loss': 1.3103, 'grad_norm': 0.0010905955645344372, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<30:03, 3.67s/it] 6%|▌ | 30/520 [02:03<30:06, 3.69s/it] {'loss': 1.3862, 'grad_norm': 0.0010290475599853087, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<30:06, 3.69s/it] 6%|▌ | 31/520 [02:06<29:56, 3.67s/it] {'loss': 1.2828, 'grad_norm': 0.0010193364269628004, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:06<29:56, 3.67s/it] 6%|▌ | 32/520 [02:10<29:56, 3.68s/it] {'loss': 1.2235, 'grad_norm': 0.000931003621535655, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:56, 3.68s/it] 6%|▋ | 33/520 [02:14<29:51, 3.68s/it] {'loss': 1.2821, 'grad_norm': 0.0010933109813937266, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<29:51, 3.68s/it] 7%|▋ | 34/520 [02:17<29:45, 3.67s/it] {'loss': 1.2777, 'grad_norm': 0.0011436396996279189, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:17<29:45, 3.67s/it] 7%|▋ | 35/520 [02:21<29:40, 3.67s/it] {'loss': 1.2777, 'grad_norm': 0.0012138090547376063, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<29:40, 3.67s/it] 7%|▋ | 36/520 [02:25<29:34, 3.67s/it] {'loss': 1.3716, 'grad_norm': 0.0009572571963619636, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<29:34, 3.67s/it] 7%|▋ | 37/520 [02:28<29:27, 3.66s/it] {'loss': 1.3613, 'grad_norm': 0.0009011670612313195, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:28<29:27, 3.66s/it] 7%|▋ | 38/520 [02:32<29:21, 3.66s/it] {'loss': 1.4471, 'grad_norm': 0.0009857915568303168, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:21, 3.66s/it] 8%|▊ | 39/520 [02:35<29:15, 3.65s/it] {'loss': 1.3058, 'grad_norm': 0.0011610431283540273, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:35<29:15, 3.65s/it] 8%|▊ | 40/520 [02:39<29:13, 3.65s/it] {'loss': 1.3372, 'grad_norm': 0.0009826063989245575, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:39<29:13, 3.65s/it] 8%|▊ | 41/520 [02:43<29:10, 3.65s/it] {'loss': 1.3149, 'grad_norm': 0.0009779150939319974, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:10, 3.65s/it] 8%|▊ | 42/520 [02:46<29:03, 3.65s/it] {'loss': 1.3141, 'grad_norm': 0.0012588239769890743, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:46<29:03, 3.65s/it] 8%|▊ | 43/520 [02:50<29:07, 3.66s/it] {'loss': 1.2552, 'grad_norm': 0.000962313684243939, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:50<29:07, 3.66s/it] 8%|▊ | 44/520 [02:54<29:02, 3.66s/it] {'loss': 1.3541, 'grad_norm': 0.0010299805666515493, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<29:02, 3.66s/it] 9%|▊ | 45/520 [02:57<29:00, 3.66s/it] {'loss': 1.3354, 'grad_norm': 0.0010539726611957674, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:57<29:00, 3.66s/it] 9%|▉ | 46/520 [03:01<29:06, 3.68s/it] {'loss': 1.3995, 'grad_norm': 0.0010370436354797778, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:01<29:06, 3.68s/it] 9%|▉ | 47/520 [03:05<29:01, 3.68s/it] {'loss': 1.3117, 'grad_norm': 0.0010387506865769832, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<29:01, 3.68s/it] 9%|▉ | 48/520 [03:09<28:54, 3.68s/it] {'loss': 1.3, 'grad_norm': 0.0011862971005250267, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<28:54, 3.68s/it] 9%|▉ | 49/520 [03:12<28:53, 3.68s/it] {'loss': 1.3422, 'grad_norm': 0.0011219676388486536, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:12<28:53, 3.68s/it] 10%|▉ | 50/520 [03:16<28:49, 3.68s/it] {'loss': 1.3301, 'grad_norm': 0.0009666692273870511, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:16<28:49, 3.68s/it] 10%|▉ | 51/520 [03:20<28:43, 3.68s/it] {'loss': 1.2686, 'grad_norm': 0.0011853802185180478, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:20<28:43, 3.68s/it] 10%|█ | 52/520 [03:23<28:36, 3.67s/it] {'loss': 1.3985, 'grad_norm': 0.0012006213134254271, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:23<28:36, 3.67s/it] 10%|█ | 53/520 [03:27<28:28, 3.66s/it] {'loss': 1.3744, 'grad_norm': 0.001149126880988097, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:27<28:28, 3.66s/it] 10%|█ | 54/520 [03:30<28:23, 3.66s/it] {'loss': 1.3058, 'grad_norm': 0.0010014746192257805, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<28:23, 3.66s/it] 11%|█ | 55/520 [03:34<28:20, 3.66s/it] {'loss': 1.2738, 'grad_norm': 0.0011550726664443507, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:34<28:20, 3.66s/it] 11%|█ | 56/520 [03:38<28:23, 3.67s/it] {'loss': 1.3992, 'grad_norm': 0.001096588210905573, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:38<28:23, 3.67s/it] 11%|█ | 57/520 [03:42<28:22, 3.68s/it] {'loss': 1.2578, 'grad_norm': 0.0011978734674216334, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<28:22, 3.68s/it] 11%|█ | 58/520 [03:45<28:21, 3.68s/it] {'loss': 1.4193, 'grad_norm': 0.0008865581471522937, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:45<28:21, 3.68s/it] 11%|█▏ | 59/520 [03:49<28:21, 3.69s/it] {'loss': 1.2337, 'grad_norm': 0.0010100094850519162, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:49<28:21, 3.69s/it] 12%|█▏ | 60/520 [03:53<28:29, 3.72s/it] {'loss': 1.3229, 'grad_norm': 0.0009809011813364833, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:53<28:29, 3.72s/it] 12%|█▏ | 61/520 [03:57<28:33, 3.73s/it] {'loss': 1.3137, 'grad_norm': 0.0010396829868412575, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:57<28:33, 3.73s/it] 12%|█▏ | 62/520 [04:00<28:24, 3.72s/it] {'loss': 1.3105, 'grad_norm': 0.0011164563490080185, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:00<28:24, 3.72s/it] 12%|█▏ | 63/520 [04:04<28:10, 3.70s/it] {'loss': 1.2975, 'grad_norm': 0.000969459861238582, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:04<28:10, 3.70s/it] 12%|█▏ | 64/520 [04:08<28:06, 3.70s/it] {'loss': 1.3274, 'grad_norm': 0.0010424155273887515, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:08<28:06, 3.70s/it] 12%|█▎ | 65/520 [04:11<28:02, 3.70s/it] {'loss': 1.3316, 'grad_norm': 0.0011744335871161154, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:11<28:02, 3.70s/it] 13%|█▎ | 66/520 [04:15<27:53, 3.69s/it] {'loss': 1.2866, 'grad_norm': 0.0010732766012672864, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:15<27:53, 3.69s/it] 13%|█▎ | 67/520 [04:19<27:44, 3.67s/it] {'loss': 1.1955, 'grad_norm': 0.0010383166823071516, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:19<27:44, 3.67s/it] 13%|█▎ | 68/520 [04:22<27:46, 3.69s/it] {'loss': 1.259, 'grad_norm': 0.0009906919934726296, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:22<27:46, 3.69s/it] 13%|█▎ | 69/520 [04:26<27:37, 3.68s/it] {'loss': 1.2463, 'grad_norm': 0.0014240461452889137, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:26<27:37, 3.68s/it] 13%|█▎ | 70/520 [04:30<27:32, 3.67s/it] {'loss': 1.2705, 'grad_norm': 0.0011969042897294847, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:30<27:32, 3.67s/it] 14%|█▎ | 71/520 [04:33<27:22, 3.66s/it] {'loss': 1.2134, 'grad_norm': 0.0009470682386547593, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:33<27:22, 3.66s/it] 14%|█▍ | 72/520 [04:37<27:17, 3.66s/it] {'loss': 1.361, 'grad_norm': 0.001119947933494737, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:37<27:17, 3.66s/it] 14%|█▍ | 73/520 [04:41<27:18, 3.67s/it] {'loss': 1.1954, 'grad_norm': 0.0010511380959632494, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:41<27:18, 3.67s/it] 14%|█▍ | 74/520 [04:44<27:17, 3.67s/it] {'loss': 1.3017, 'grad_norm': 0.0010708762668510414, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:44<27:17, 3.67s/it] 14%|█▍ | 75/520 [04:48<27:09, 3.66s/it] {'loss': 1.2174, 'grad_norm': 0.000960918306212466, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:48<27:09, 3.66s/it] 15%|█▍ | 76/520 [04:52<27:07, 3.66s/it] {'loss': 1.356, 'grad_norm': 0.000909600582553652, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:52<27:07, 3.66s/it] 15%|█▍ | 77/520 [04:55<27:03, 3.67s/it] {'loss': 1.1396, 'grad_norm': 0.001145177928591822, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:55<27:03, 3.67s/it] 15%|█▌ | 78/520 [04:59<26:58, 3.66s/it] {'loss': 1.2497, 'grad_norm': 0.0010560062084845777, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [04:59<26:58, 3.66s/it] 15%|█▌ | 79/520 [05:03<26:56, 3.66s/it] {'loss': 1.2438, 'grad_norm': 0.0010587556852241621, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:03<26:56, 3.66s/it] 15%|█▌ | 80/520 [05:06<26:54, 3.67s/it] {'loss': 1.3406, 'grad_norm': 0.0011081552931313586, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:06<26:54, 3.67s/it] 16%|█▌ | 81/520 [05:10<26:50, 3.67s/it] {'loss': 1.3849, 'grad_norm': 0.0015691736914616409, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:10<26:50, 3.67s/it] 16%|█▌ | 82/520 [05:14<26:44, 3.66s/it] {'loss': 1.3075, 'grad_norm': 0.0010506496169048272, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:14<26:44, 3.66s/it] 16%|█▌ | 83/520 [05:17<26:39, 3.66s/it] {'loss': 1.324, 'grad_norm': 0.0011730951097349669, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:17<26:39, 3.66s/it] 16%|█▌ | 84/520 [05:21<26:37, 3.66s/it] {'loss': 1.3355, 'grad_norm': 0.001155766276170746, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:21<26:37, 3.66s/it] 16%|█▋ | 85/520 [05:25<26:33, 3.66s/it] {'loss': 1.3784, 'grad_norm': 0.0011507990912218349, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:25<26:33, 3.66s/it] 17%|█▋ | 86/520 [05:28<26:33, 3.67s/it] {'loss': 1.3665, 'grad_norm': 0.0010367778402751867, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:28<26:33, 3.67s/it] 17%|█▋ | 87/520 [05:32<26:31, 3.68s/it] {'loss': 1.288, 'grad_norm': 0.0009420945797432727, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:32<26:31, 3.68s/it] 17%|█▋ | 88/520 [05:36<26:31, 3.68s/it] {'loss': 1.234, 'grad_norm': 0.0008205513779817225, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:36<26:31, 3.68s/it] 17%|█▋ | 89/520 [05:39<26:29, 3.69s/it] {'loss': 1.3227, 'grad_norm': 0.0010656130305110407, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:39<26:29, 3.69s/it] 17%|█▋ | 90/520 [05:43<26:26, 3.69s/it] {'loss': 1.2573, 'grad_norm': 0.0009916552657062132, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:43<26:26, 3.69s/it] 18%|█▊ | 91/520 [05:47<26:21, 3.69s/it] {'loss': 1.3302, 'grad_norm': 0.0009836114560322696, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:47<26:21, 3.69s/it] 18%|█▊ | 92/520 [05:50<26:15, 3.68s/it] {'loss': 1.2763, 'grad_norm': 0.0010959576912247487, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:50<26:15, 3.68s/it] 18%|█▊ | 93/520 [05:54<26:11, 3.68s/it] {'loss': 1.2839, 'grad_norm': 0.0011741761131337507, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:54<26:11, 3.68s/it] 18%|█▊ | 94/520 [05:58<26:12, 3.69s/it] {'loss': 1.3564, 'grad_norm': 0.0010153247072201348, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [05:58<26:12, 3.69s/it] 18%|█▊ | 95/520 [06:02<26:20, 3.72s/it] {'loss': 1.2626, 'grad_norm': 0.0012814689200686567, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:02<26:20, 3.72s/it] 18%|█▊ | 96/520 [06:05<26:34, 3.76s/it] {'loss': 1.2752, 'grad_norm': 0.0008602828030959271, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:05<26:34, 3.76s/it] 19%|█▊ | 97/520 [06:09<26:42, 3.79s/it] {'loss': 1.2423, 'grad_norm': 0.0013722023798756276, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:09<26:42, 3.79s/it] 19%|█▉ | 98/520 [06:13<27:13, 3.87s/it] {'loss': 1.2491, 'grad_norm': 0.0009623812671295942, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:13<27:13, 3.87s/it] 19%|█▉ | 99/520 [06:17<27:47, 3.96s/it] {'loss': 1.255, 'grad_norm': 0.0011483201316733877, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:17<27:47, 3.96s/it] 19%|█▉ | 100/520 [06:22<27:55, 3.99s/it] {'loss': 1.2298, 'grad_norm': 0.0009209105388669315, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:22<27:55, 3.99s/it] 19%|█▉ | 101/520 [06:25<27:28, 3.93s/it] {'loss': 1.2647, 'grad_norm': 0.001065755857688162, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:25<27:28, 3.93s/it] 20%|█▉ | 102/520 [06:29<27:09, 3.90s/it] {'loss': 1.2711, 'grad_norm': 0.0010739890071377659, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:29<27:09, 3.90s/it] 20%|█▉ | 103/520 [06:33<27:01, 3.89s/it] {'loss': 1.198, 'grad_norm': 0.0009875221969867715, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:33<27:01, 3.89s/it] 20%|██ | 104/520 [06:37<26:54, 3.88s/it] {'loss': 1.2741, 'grad_norm': 0.00102987582799406, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:37<26:54, 3.88s/it] 20%|██ | 105/520 [06:41<26:36, 3.85s/it] {'loss': 1.2665, 'grad_norm': 0.0009589338999282769, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:41<26:36, 3.85s/it] 20%|██ | 106/520 [06:44<26:07, 3.79s/it] {'loss': 1.2635, 'grad_norm': 0.0009204340396146165, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:44<26:07, 3.79s/it] 21%|██ | 107/520 [06:48<25:46, 3.74s/it] {'loss': 1.2407, 'grad_norm': 0.000984565156424566, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:48<25:46, 3.74s/it] 21%|██ | 108/520 [06:52<25:34, 3.72s/it] {'loss': 1.2265, 'grad_norm': 0.001069309749034078, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:52<25:34, 3.72s/it] 21%|██ | 109/520 [06:55<25:22, 3.70s/it] {'loss': 1.2152, 'grad_norm': 0.0008715028389095948, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:55<25:22, 3.70s/it] 21%|██ | 110/520 [06:59<25:09, 3.68s/it] {'loss': 1.4055, 'grad_norm': 0.0010744513786503473, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [06:59<25:09, 3.68s/it] 21%|██▏ | 111/520 [07:03<25:03, 3.68s/it] {'loss': 1.4007, 'grad_norm': 0.0010447160968648525, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:03<25:03, 3.68s/it] 22%|██▏ | 112/520 [07:06<25:01, 3.68s/it] {'loss': 1.2844, 'grad_norm': 0.0009673712455368104, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:06<25:01, 3.68s/it] 22%|██▏ | 113/520 [07:10<25:00, 3.69s/it] {'loss': 1.1828, 'grad_norm': 0.0009263895777106644, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:10<25:00, 3.69s/it] 22%|██▏ | 114/520 [07:14<24:49, 3.67s/it] {'loss': 1.2764, 'grad_norm': 0.0009614281711212883, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:14<24:49, 3.67s/it] 22%|██▏ | 115/520 [07:17<25:06, 3.72s/it] {'loss': 1.3712, 'grad_norm': 0.0010172553487982974, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:17<25:06, 3.72s/it] 22%|██▏ | 116/520 [07:22<25:52, 3.84s/it] {'loss': 1.3845, 'grad_norm': 0.0009775232317026973, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:22<25:52, 3.84s/it] 22%|██▎ | 117/520 [07:26<26:32, 3.95s/it] {'loss': 1.3471, 'grad_norm': 0.0011555374436369608, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:26<26:32, 3.95s/it] 23%|██▎ | 118/520 [07:30<26:53, 4.01s/it] {'loss': 1.2706, 'grad_norm': 0.0009460672046867936, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:30<26:53, 4.01s/it] 23%|██▎ | 119/520 [07:34<26:35, 3.98s/it] {'loss': 1.2258, 'grad_norm': 0.0010358018207812, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:34<26:35, 3.98s/it] 23%|██▎ | 120/520 [07:38<26:21, 3.95s/it] {'loss': 1.2348, 'grad_norm': 0.0016633077248294228, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:38<26:21, 3.95s/it] 23%|██▎ | 121/520 [07:42<26:07, 3.93s/it] {'loss': 1.2883, 'grad_norm': 0.0013871948086231804, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:42<26:07, 3.93s/it] 23%|██▎ | 122/520 [07:45<25:59, 3.92s/it] {'loss': 1.1982, 'grad_norm': 0.0009893999543444513, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:45<25:59, 3.92s/it] 24%|██▎ | 123/520 [07:49<25:52, 3.91s/it] {'loss': 1.3072, 'grad_norm': 0.0011142882082262825, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:49<25:52, 3.91s/it] 24%|██▍ | 124/520 [07:53<25:44, 3.90s/it] {'loss': 1.2558, 'grad_norm': 0.0010381963559076503, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:53<25:44, 3.90s/it] 24%|██▍ | 125/520 [07:57<25:38, 3.89s/it] {'loss': 1.2519, 'grad_norm': 0.0009758648502108945, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:57<25:38, 3.89s/it] 24%|██▍ | 126/520 [08:02<27:00, 4.11s/it] {'loss': 1.2369, 'grad_norm': 0.0008866308681652045, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:02<27:00, 4.11s/it] 24%|██▍ | 127/520 [08:06<26:28, 4.04s/it] {'loss': 1.2303, 'grad_norm': 0.0011975657766295884, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:06<26:28, 4.04s/it] 25%|██▍ | 128/520 [08:09<26:06, 4.00s/it] {'loss': 1.2717, 'grad_norm': 0.0011401965422691135, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:09<26:06, 4.00s/it] 25%|██▍ | 129/520 [08:13<25:51, 3.97s/it] {'loss': 1.2406, 'grad_norm': 0.0010682027787159685, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:13<25:51, 3.97s/it] 25%|██▌ | 130/520 [08:17<25:24, 3.91s/it] {'loss': 1.2661, 'grad_norm': 0.0009048512327961321, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:17<25:24, 3.91s/it] 25%|██▌ | 131/520 [08:21<24:54, 3.84s/it] {'loss': 1.2031, 'grad_norm': 0.0008827882862319326, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:21<24:54, 3.84s/it] 25%|██▌ | 132/520 [08:24<24:27, 3.78s/it] {'loss': 1.3102, 'grad_norm': 0.0010766461782652362, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:25<24:27, 3.78s/it] 26%|██▌ | 133/520 [08:28<24:29, 3.80s/it] {'loss': 1.2344, 'grad_norm': 0.0010668102302938067, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:28<24:29, 3.80s/it] 26%|██▌ | 134/520 [08:32<24:39, 3.83s/it] {'loss': 1.3073, 'grad_norm': 0.0009956229718168224, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:32<24:39, 3.83s/it] 26%|██▌ | 135/520 [08:36<24:28, 3.81s/it] {'loss': 1.357, 'grad_norm': 0.000976882820635974, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:36<24:28, 3.81s/it] 26%|██▌ | 136/520 [08:40<24:06, 3.77s/it] {'loss': 1.308, 'grad_norm': 0.0009857511087755823, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:40<24:06, 3.77s/it] 26%|██▋ | 137/520 [08:43<23:48, 3.73s/it] {'loss': 1.2216, 'grad_norm': 0.0012398261679090199, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:43<23:48, 3.73s/it] 27%|██▋ | 138/520 [08:47<23:35, 3.71s/it] {'loss': 1.2372, 'grad_norm': 0.000909519188500034, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:47<23:35, 3.71s/it] 27%|██▋ | 139/520 [08:51<23:26, 3.69s/it] {'loss': 1.1153, 'grad_norm': 0.0010271383085458244, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:51<23:26, 3.69s/it] 27%|██▋ | 140/520 [08:54<23:18, 3.68s/it] {'loss': 1.2546, 'grad_norm': 0.0009082486849981449, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:54<23:18, 3.68s/it] 27%|██▋ | 141/520 [08:58<23:08, 3.66s/it] {'loss': 1.3402, 'grad_norm': 0.0009318039815104679, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:58<23:08, 3.66s/it] 27%|██▋ | 142/520 [09:02<23:05, 3.66s/it] {'loss': 1.2657, 'grad_norm': 0.0009178029078416684, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:02<23:05, 3.66s/it] 28%|██▊ | 143/520 [09:05<22:59, 3.66s/it] {'loss': 1.2702, 'grad_norm': 0.0011501401735882035, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:05<22:59, 3.66s/it] 28%|██▊ | 144/520 [09:09<22:54, 3.65s/it] {'loss': 1.2387, 'grad_norm': 0.0010506990056964244, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:09<22:54, 3.65s/it] 28%|██▊ | 145/520 [09:13<23:00, 3.68s/it] {'loss': 1.1666, 'grad_norm': 0.0009337286281945915, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:13<23:00, 3.68s/it] 28%|██▊ | 146/520 [09:16<23:15, 3.73s/it] {'loss': 1.3191, 'grad_norm': 0.000950528065413526, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:16<23:15, 3.73s/it] 28%|██▊ | 147/520 [09:20<23:27, 3.77s/it] {'loss': 1.2124, 'grad_norm': 0.0010588749228321794, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:20<23:27, 3.77s/it] 28%|██▊ | 148/520 [09:24<23:12, 3.74s/it] {'loss': 1.2384, 'grad_norm': 0.0009269783519255525, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:24<23:12, 3.74s/it] 29%|██▊ | 149/520 [09:28<22:59, 3.72s/it] {'loss': 1.1786, 'grad_norm': 0.0009340071406802442, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:28<22:59, 3.72s/it] 29%|██▉ | 150/520 [09:31<22:50, 3.70s/it] {'loss': 1.4015, 'grad_norm': 0.0009869278077891726, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:31<22:50, 3.70s/it] 29%|██▉ | 151/520 [09:35<22:40, 3.69s/it] {'loss': 1.2283, 'grad_norm': 0.0009731641590668837, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:35<22:40, 3.69s/it] 29%|██▉ | 152/520 [09:39<22:34, 3.68s/it] {'loss': 1.201, 'grad_norm': 0.00099135972261879, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:39<22:34, 3.68s/it] 29%|██▉ | 153/520 [09:42<22:26, 3.67s/it] {'loss': 1.2371, 'grad_norm': 0.0009586733045477706, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:42<22:26, 3.67s/it] 30%|██▉ | 154/520 [09:46<22:22, 3.67s/it] {'loss': 1.3145, 'grad_norm': 0.0009248169144795478, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:46<22:22, 3.67s/it] 30%|██▉ | 155/520 [09:50<22:17, 3.66s/it] {'loss': 1.2323, 'grad_norm': 0.0009732546887857118, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:50<22:17, 3.66s/it] 30%|███ | 156/520 [09:53<22:18, 3.68s/it] {'loss': 1.2552, 'grad_norm': 0.001368949241931711, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:53<22:18, 3.68s/it] 30%|███ | 157/520 [09:57<22:27, 3.71s/it] {'loss': 1.3077, 'grad_norm': 0.0009079861786708349, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:57<22:27, 3.71s/it] 30%|███ | 158/520 [10:01<22:31, 3.73s/it] {'loss': 1.2358, 'grad_norm': 0.001005450151406939, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:01<22:31, 3.73s/it] 31%|███ | 159/520 [10:05<22:34, 3.75s/it] {'loss': 1.2804, 'grad_norm': 0.0009583534408845178, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:05<22:34, 3.75s/it] 31%|███ | 160/520 [10:08<22:38, 3.77s/it] {'loss': 1.2867, 'grad_norm': 0.0010229313716077575, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:09<22:38, 3.77s/it] 31%|███ | 161/520 [10:12<22:35, 3.77s/it] {'loss': 1.261, 'grad_norm': 0.0009476579032866118, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:12<22:35, 3.77s/it] 31%|███ | 162/520 [10:16<22:33, 3.78s/it] {'loss': 1.2502, 'grad_norm': 0.0009233853980992958, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:16<22:33, 3.78s/it] 31%|███▏ | 163/520 [10:20<22:31, 3.79s/it] {'loss': 1.1666, 'grad_norm': 0.0011339578092371678, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:20<22:31, 3.79s/it] 32%|███▏ | 164/520 [10:24<22:28, 3.79s/it] {'loss': 1.1284, 'grad_norm': 0.0009051880987004089, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:24<22:28, 3.79s/it] 32%|███▏ | 165/520 [10:27<22:25, 3.79s/it] {'loss': 1.2756, 'grad_norm': 0.0009082956745302138, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:27<22:25, 3.79s/it] 32%|███▏ | 166/520 [10:31<22:13, 3.77s/it] {'loss': 1.2448, 'grad_norm': 0.0010360513217459663, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:31<22:13, 3.77s/it] 32%|███▏ | 167/520 [10:35<21:55, 3.73s/it] {'loss': 1.2385, 'grad_norm': 0.0009900447013953828, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:35<21:55, 3.73s/it] 32%|███▏ | 168/520 [10:38<21:42, 3.70s/it] {'loss': 1.1872, 'grad_norm': 0.0009282863847631813, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:38<21:42, 3.70s/it] 32%|███▎ | 169/520 [10:42<21:32, 3.68s/it] {'loss': 1.2579, 'grad_norm': 0.0009764756553733049, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:42<21:32, 3.68s/it] 33%|███▎ | 170/520 [10:46<21:26, 3.68s/it] {'loss': 1.2034, 'grad_norm': 0.0008985428467872114, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:46<21:26, 3.68s/it] 33%|███▎ | 171/520 [10:49<21:21, 3.67s/it] {'loss': 1.1939, 'grad_norm': 0.0009984888961391337, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:49<21:21, 3.67s/it] 33%|███▎ | 172/520 [10:53<21:12, 3.66s/it] {'loss': 1.2664, 'grad_norm': 0.0009218538270098547, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:53<21:12, 3.66s/it] 33%|███▎ | 173/520 [10:57<21:05, 3.65s/it] {'loss': 1.205, 'grad_norm': 0.0009046874169647116, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:57<21:05, 3.65s/it] 33%|███▎ | 174/520 [11:00<21:03, 3.65s/it] {'loss': 1.2561, 'grad_norm': 0.001017881393139599, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:00<21:03, 3.65s/it] 34%|███▎ | 175/520 [11:04<20:58, 3.65s/it] {'loss': 1.1751, 'grad_norm': 0.0008989344544138775, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:04<20:58, 3.65s/it] 34%|███▍ | 176/520 [11:08<20:52, 3.64s/it] {'loss': 1.2809, 'grad_norm': 0.0009611822708689744, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:08<20:52, 3.64s/it] 34%|███▍ | 177/520 [11:11<20:50, 3.64s/it] {'loss': 1.1619, 'grad_norm': 0.0010392210465408817, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:11<20:50, 3.64s/it] 34%|███▍ | 178/520 [11:15<20:45, 3.64s/it] {'loss': 1.2403, 'grad_norm': 0.001062839533300205, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:15<20:45, 3.64s/it] 34%|███▍ | 179/520 [11:19<20:42, 3.64s/it] {'loss': 1.315, 'grad_norm': 0.0009437616043092136, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:19<20:42, 3.64s/it] 35%|███▍ | 180/520 [11:22<20:43, 3.66s/it] {'loss': 1.2379, 'grad_norm': 0.0009810170167771472, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:22<20:43, 3.66s/it] 35%|███▍ | 181/520 [11:26<20:36, 3.65s/it] {'loss': 1.2113, 'grad_norm': 0.0008482347097156155, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:26<20:36, 3.65s/it] 35%|███▌ | 182/520 [11:29<20:32, 3.65s/it] {'loss': 1.2214, 'grad_norm': 0.000961463712430654, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:29<20:32, 3.65s/it] 35%|███▌ | 183/520 [11:33<20:27, 3.64s/it] {'loss': 1.2458, 'grad_norm': 0.0009253612432406382, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:33<20:27, 3.64s/it] 35%|███▌ | 184/520 [11:37<20:25, 3.65s/it] {'loss': 1.1803, 'grad_norm': 0.0009745999231659048, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:37<20:25, 3.65s/it] 36%|███▌ | 185/520 [11:40<20:20, 3.64s/it] {'loss': 1.3158, 'grad_norm': 0.0009709688211079367, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:40<20:20, 3.64s/it] 36%|███▌ | 186/520 [11:44<20:14, 3.63s/it] {'loss': 1.2044, 'grad_norm': 0.0009669922968031739, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:44<20:14, 3.63s/it] 36%|███▌ | 187/520 [11:48<20:08, 3.63s/it] {'loss': 1.2015, 'grad_norm': 0.0010556798179512954, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:48<20:08, 3.63s/it] 36%|███▌ | 188/520 [11:51<20:04, 3.63s/it] {'loss': 1.2876, 'grad_norm': 0.001009170251850843, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:51<20:04, 3.63s/it] 36%|███▋ | 189/520 [11:55<20:02, 3.63s/it] {'loss': 1.2911, 'grad_norm': 0.0008965052008750161, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:55<20:02, 3.63s/it] 37%|███▋ | 190/520 [11:59<20:00, 3.64s/it] {'loss': 1.2167, 'grad_norm': 0.0010095462552109694, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:59<20:00, 3.64s/it] 37%|███▋ | 191/520 [12:02<20:10, 3.68s/it] {'loss': 1.1748, 'grad_norm': 0.0008741367101456506, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:02<20:10, 3.68s/it] 37%|███▋ | 192/520 [12:06<20:24, 3.73s/it] {'loss': 1.2485, 'grad_norm': 0.0008532518273569921, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:06<20:24, 3.73s/it] 37%|███▋ | 193/520 [12:10<20:37, 3.78s/it] {'loss': 1.2064, 'grad_norm': 0.0010325029508946248, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:10<20:37, 3.78s/it] 37%|███▋ | 194/520 [12:14<20:29, 3.77s/it] {'loss': 1.1048, 'grad_norm': 0.0009102312452799904, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:14<20:29, 3.77s/it] 38%|███▊ | 195/520 [12:17<20:15, 3.74s/it] {'loss': 1.2736, 'grad_norm': 0.000950285364795647, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:17<20:15, 3.74s/it] 38%|███▊ | 196/520 [12:21<20:02, 3.71s/it] {'loss': 1.2472, 'grad_norm': 0.0010482123220189705, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:21<20:02, 3.71s/it] 38%|███▊ | 197/520 [12:25<19:54, 3.70s/it] {'loss': 1.194, 'grad_norm': 0.0009312958505172198, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:25<19:54, 3.70s/it] 38%|███▊ | 198/520 [12:28<19:46, 3.68s/it] {'loss': 1.2652, 'grad_norm': 0.001025085376612764, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:28<19:46, 3.68s/it] 38%|███▊ | 199/520 [12:32<19:41, 3.68s/it] {'loss': 1.1863, 'grad_norm': 0.000957649057526751, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:32<19:41, 3.68s/it] 38%|███▊ | 200/520 [12:36<19:35, 3.67s/it] {'loss': 1.1581, 'grad_norm': 0.0009872217130524744, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:36<19:35, 3.67s/it] 39%|███▊ | 201/520 [12:39<19:30, 3.67s/it] {'loss': 1.1847, 'grad_norm': 0.0008309087896123169, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:39<19:30, 3.67s/it] 39%|███▉ | 202/520 [12:43<19:25, 3.67s/it] {'loss': 1.1886, 'grad_norm': 0.0009821061493752374, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:43<19:25, 3.67s/it] 39%|███▉ | 203/520 [12:47<19:18, 3.66s/it] {'loss': 1.2388, 'grad_norm': 0.0009803670558472034, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:47<19:18, 3.66s/it] 39%|███▉ | 204/520 [12:50<19:17, 3.66s/it] {'loss': 1.2523, 'grad_norm': 0.0009636834874711443, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:50<19:17, 3.66s/it] 39%|███▉ | 205/520 [12:54<19:17, 3.68s/it] {'loss': 1.181, 'grad_norm': 0.0008959025115105896, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:54<19:17, 3.68s/it] 40%|███▉ | 206/520 [12:58<19:12, 3.67s/it] {'loss': 1.2861, 'grad_norm': 0.0009289775456198224, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:58<19:12, 3.67s/it] 40%|███▉ | 207/520 [13:01<19:10, 3.68s/it] {'loss': 1.1514, 'grad_norm': 0.0008841031830533658, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:01<19:10, 3.68s/it] 40%|████ | 208/520 [13:05<19:06, 3.67s/it] {'loss': 1.2768, 'grad_norm': 0.0010203799700314552, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:05<19:06, 3.67s/it] 40%|████ | 209/520 [13:09<19:02, 3.67s/it] {'loss': 1.1907, 'grad_norm': 0.0009324374511816278, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:09<19:02, 3.67s/it] 40%|████ | 210/520 [13:12<18:57, 3.67s/it] {'loss': 1.2626, 'grad_norm': 0.0010406097320658696, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:12<18:57, 3.67s/it] 41%|████ | 211/520 [13:16<18:51, 3.66s/it] {'loss': 1.2635, 'grad_norm': 0.0009047301189561964, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:16<18:51, 3.66s/it] 41%|████ | 212/520 [13:20<18:52, 3.68s/it] {'loss': 1.258, 'grad_norm': 0.0009350071653715741, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:20<18:52, 3.68s/it] 41%|████ | 213/520 [13:24<18:49, 3.68s/it] {'loss': 1.2123, 'grad_norm': 0.0010660644885332824, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:24<18:49, 3.68s/it] 41%|████ | 214/520 [13:27<18:42, 3.67s/it] {'loss': 1.198, 'grad_norm': 0.0009443222009332257, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:27<18:42, 3.67s/it] 41%|████▏ | 215/520 [13:31<18:42, 3.68s/it] {'loss': 1.1167, 'grad_norm': 0.0008705328366143824, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:31<18:42, 3.68s/it] 42%|████▏ | 216/520 [13:35<18:44, 3.70s/it] {'loss': 1.1299, 'grad_norm': 0.0009778328329823566, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:35<18:44, 3.70s/it] 42%|████▏ | 217/520 [13:38<18:41, 3.70s/it] {'loss': 1.2489, 'grad_norm': 0.0010631490314985301, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:38<18:41, 3.70s/it] 42%|████▏ | 218/520 [13:42<18:32, 3.68s/it] {'loss': 1.2267, 'grad_norm': 0.0010039564650998127, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:42<18:32, 3.68s/it] 42%|████▏ | 219/520 [13:46<18:23, 3.67s/it] {'loss': 1.2369, 'grad_norm': 0.0008709729155235728, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:46<18:23, 3.67s/it] 42%|████▏ | 220/520 [13:49<18:17, 3.66s/it] {'loss': 1.1646, 'grad_norm': 0.0008822386859693299, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:49<18:17, 3.66s/it] 42%|████▎ | 221/520 [13:53<18:13, 3.66s/it] {'loss': 1.2362, 'grad_norm': 0.0009274558825659965, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:53<18:13, 3.66s/it] 43%|████▎ | 222/520 [13:57<18:09, 3.65s/it] {'loss': 1.174, 'grad_norm': 0.000959526768199347, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:57<18:09, 3.65s/it] 43%|████▎ | 223/520 [14:00<18:05, 3.65s/it] {'loss': 1.1735, 'grad_norm': 0.00089689394959947, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:00<18:05, 3.65s/it] 43%|████▎ | 224/520 [14:04<18:01, 3.65s/it] {'loss': 1.2185, 'grad_norm': 0.0008312457051461342, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:04<18:01, 3.65s/it] 43%|████▎ | 225/520 [14:07<17:57, 3.65s/it] {'loss': 1.1736, 'grad_norm': 0.0009360072446305192, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:07<17:57, 3.65s/it] 43%|████▎ | 226/520 [14:11<17:55, 3.66s/it] {'loss': 1.2721, 'grad_norm': 0.0009046902826910493, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:11<17:55, 3.66s/it] 44%|████▎ | 227/520 [14:15<17:50, 3.65s/it] {'loss': 1.2602, 'grad_norm': 0.0009162333296717846, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:15<17:50, 3.65s/it] 44%|████▍ | 228/520 [14:18<17:48, 3.66s/it] {'loss': 1.2659, 'grad_norm': 0.0009397029134371347, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:18<17:48, 3.66s/it] 44%|████▍ | 229/520 [14:22<17:43, 3.66s/it] {'loss': 1.2327, 'grad_norm': 0.0008777269895219238, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:22<17:43, 3.66s/it] 44%|████▍ | 230/520 [14:26<17:42, 3.66s/it] {'loss': 1.1284, 'grad_norm': 0.0009159318202002505, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:26<17:42, 3.66s/it] 44%|████▍ | 231/520 [14:29<17:38, 3.66s/it] {'loss': 1.1903, 'grad_norm': 0.0009091069482665659, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:29<17:38, 3.66s/it] 45%|████▍ | 232/520 [14:33<17:35, 3.67s/it] {'loss': 1.3015, 'grad_norm': 0.0010029931015007297, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:33<17:35, 3.67s/it] 45%|████▍ | 233/520 [14:37<17:29, 3.66s/it] {'loss': 1.1922, 'grad_norm': 0.0010039790979858097, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:37<17:29, 3.66s/it] 45%|████▌ | 234/520 [14:40<17:29, 3.67s/it] {'loss': 1.1438, 'grad_norm': 0.0010581776608734174, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:40<17:29, 3.67s/it] 45%|████▌ | 235/520 [14:44<17:23, 3.66s/it] {'loss': 1.1958, 'grad_norm': 0.0010028851773323556, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:44<17:23, 3.66s/it] 45%|████▌ | 236/520 [14:48<17:17, 3.65s/it] {'loss': 1.2632, 'grad_norm': 0.0008854840791968963, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:48<17:17, 3.65s/it] 46%|████▌ | 237/520 [14:51<17:14, 3.65s/it] {'loss': 1.2709, 'grad_norm': 0.0009622181313688621, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:51<17:14, 3.65s/it] 46%|████▌ | 238/520 [14:55<17:08, 3.65s/it] {'loss': 1.1998, 'grad_norm': 0.0009625652324012092, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:55<17:08, 3.65s/it] 46%|████▌ | 239/520 [14:59<17:02, 3.64s/it] {'loss': 1.2721, 'grad_norm': 0.0009622940696735979, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [14:59<17:02, 3.64s/it] 46%|████▌ | 240/520 [15:02<17:02, 3.65s/it] {'loss': 1.087, 'grad_norm': 0.0009099207403073185, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:02<17:02, 3.65s/it] 46%|████▋ | 241/520 [15:06<17:00, 3.66s/it] {'loss': 1.1714, 'grad_norm': 0.0008915342063752893, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:06<17:00, 3.66s/it] 47%|████▋ | 242/520 [15:10<17:05, 3.69s/it] {'loss': 1.1852, 'grad_norm': 0.0008678023562627713, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:10<17:05, 3.69s/it] 47%|████▋ | 243/520 [15:13<17:01, 3.69s/it] {'loss': 1.1817, 'grad_norm': 0.0009325998511767559, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:13<17:01, 3.69s/it] 47%|████▋ | 244/520 [15:17<16:49, 3.66s/it] {'loss': 1.2861, 'grad_norm': 0.0009405065471325948, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:17<16:49, 3.66s/it] 47%|████▋ | 245/520 [15:21<16:43, 3.65s/it] {'loss': 1.1606, 'grad_norm': 0.0009514994706460909, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:21<16:43, 3.65s/it] 47%|████▋ | 246/520 [15:24<16:39, 3.65s/it] {'loss': 1.2883, 'grad_norm': 0.000980222144662842, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:24<16:39, 3.65s/it] 48%|████▊ | 247/520 [15:28<16:33, 3.64s/it] {'loss': 1.3339, 'grad_norm': 0.0010016607267058022, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:28<16:33, 3.64s/it] 48%|████▊ | 248/520 [15:32<16:28, 3.63s/it] {'loss': 1.1598, 'grad_norm': 0.0009545859190134356, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:32<16:28, 3.63s/it] 48%|████▊ | 249/520 [15:35<16:22, 3.63s/it] {'loss': 1.2497, 'grad_norm': 0.0009306960060350917, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:35<16:22, 3.63s/it] 48%|████▊ | 250/520 [15:39<16:20, 3.63s/it] {'loss': 1.187, 'grad_norm': 0.0009702620891823761, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:39<16:20, 3.63s/it] 48%|████▊ | 251/520 [15:42<16:16, 3.63s/it] {'loss': 1.2541, 'grad_norm': 0.0008987600344519142, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:42<16:16, 3.63s/it] 48%|████▊ | 252/520 [15:46<16:11, 3.62s/it] {'loss': 1.1949, 'grad_norm': 0.0008814973860603672, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:46<16:11, 3.62s/it] 49%|████▊ | 253/520 [15:50<16:10, 3.64s/it] {'loss': 1.249, 'grad_norm': 0.0010864264987154864, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:50<16:10, 3.64s/it] 49%|████▉ | 254/520 [15:53<16:04, 3.63s/it] {'loss': 1.1908, 'grad_norm': 0.0008918418518618563, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:53<16:04, 3.63s/it] 49%|████▉ | 255/520 [15:57<16:04, 3.64s/it] {'loss': 1.1899, 'grad_norm': 0.0010278750255613364, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [15:57<16:04, 3.64s/it] 49%|████▉ | 256/520 [16:01<16:01, 3.64s/it] {'loss': 1.2438, 'grad_norm': 0.0009891156889722664, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:01<16:01, 3.64s/it] 49%|████▉ | 257/520 [16:04<15:54, 3.63s/it] {'loss': 1.2245, 'grad_norm': 0.0009551301601044089, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:04<15:54, 3.63s/it] 50%|████▉ | 258/520 [16:08<15:53, 3.64s/it] {'loss': 1.2276, 'grad_norm': 0.0009446954752683487, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:08<15:53, 3.64s/it] 50%|████▉ | 259/520 [16:11<15:49, 3.64s/it] {'loss': 1.3087, 'grad_norm': 0.0010593727901775075, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:12<15:49, 3.64s/it] 50%|█████ | 260/520 [16:15<15:46, 3.64s/it] {'loss': 1.236, 'grad_norm': 0.0008340897454428433, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:15<15:46, 3.64s/it] 50%|█████ | 261/520 [16:19<15:42, 3.64s/it] {'loss': 1.1812, 'grad_norm': 0.0009793708465058917, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:19<15:42, 3.64s/it] 50%|█████ | 262/520 [16:22<15:39, 3.64s/it] {'loss': 1.1663, 'grad_norm': 0.0009743043083600781, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:22<15:39, 3.64s/it] 51%|█████ | 263/520 [16:26<15:37, 3.65s/it] {'loss': 1.2054, 'grad_norm': 0.00091594045129314, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:26<15:37, 3.65s/it] 51%|█████ | 264/520 [16:30<15:38, 3.66s/it] {'loss': 1.2553, 'grad_norm': 0.0008888432663365276, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:30<15:38, 3.66s/it] 51%|█████ | 265/520 [16:33<15:35, 3.67s/it] {'loss': 1.1768, 'grad_norm': 0.0010802330506563445, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:33<15:35, 3.67s/it] 51%|█████ | 266/520 [16:37<15:28, 3.66s/it] {'loss': 1.0562, 'grad_norm': 0.0008721696159479578, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:37<15:28, 3.66s/it] 51%|█████▏ | 267/520 [16:41<15:28, 3.67s/it] {'loss': 1.172, 'grad_norm': 0.0009167885184652919, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:41<15:28, 3.67s/it] 52%|█████▏ | 268/520 [16:44<15:24, 3.67s/it] {'loss': 1.3043, 'grad_norm': 0.0011571309386170408, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:44<15:24, 3.67s/it] 52%|█████▏ | 269/520 [16:48<15:22, 3.67s/it] {'loss': 1.2764, 'grad_norm': 0.0009729555676601874, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:48<15:22, 3.67s/it] 52%|█████▏ | 270/520 [16:52<15:15, 3.66s/it] {'loss': 1.1396, 'grad_norm': 0.0009053137183328879, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:52<15:15, 3.66s/it] 52%|█████▏ | 271/520 [16:55<15:10, 3.66s/it] {'loss': 1.2572, 'grad_norm': 0.0009284863349643613, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [16:55<15:10, 3.66s/it] 52%|█████▏ | 272/520 [16:59<15:11, 3.67s/it] {'loss': 1.1555, 'grad_norm': 0.0008919429798449936, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [16:59<15:11, 3.67s/it] 52%|█████▎ | 273/520 [17:03<15:10, 3.69s/it] {'loss': 1.2588, 'grad_norm': 0.0008793477906426327, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:03<15:10, 3.69s/it] 53%|█████▎ | 274/520 [17:07<15:06, 3.69s/it] {'loss': 1.2406, 'grad_norm': 0.001060624602615578, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:07<15:06, 3.69s/it] 53%|█████▎ | 275/520 [17:10<15:04, 3.69s/it] {'loss': 1.1843, 'grad_norm': 0.0012102729539446597, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:10<15:04, 3.69s/it] 53%|█████▎ | 276/520 [17:14<15:08, 3.72s/it] {'loss': 1.2456, 'grad_norm': 0.0011592374158301345, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:14<15:08, 3.72s/it] 53%|█████▎ | 277/520 [17:18<15:10, 3.75s/it] {'loss': 1.2716, 'grad_norm': 0.000859872835332574, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:18<15:10, 3.75s/it] 53%|█████▎ | 278/520 [17:22<15:05, 3.74s/it] {'loss': 1.138, 'grad_norm': 0.0008460549719875697, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:22<15:05, 3.74s/it] 54%|█████▎ | 279/520 [17:25<14:59, 3.73s/it] {'loss': 1.1447, 'grad_norm': 0.0009404916615396213, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:25<14:59, 3.73s/it] 54%|█████▍ | 280/520 [17:29<15:05, 3.77s/it] {'loss': 1.1788, 'grad_norm': 0.001077325136913165, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:29<15:05, 3.77s/it] 54%|█████▍ | 281/520 [17:33<15:05, 3.79s/it] {'loss': 1.2783, 'grad_norm': 0.0009908474963979865, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:33<15:05, 3.79s/it] 54%|█████▍ | 282/520 [17:37<15:04, 3.80s/it] {'loss': 1.1509, 'grad_norm': 0.0008873896666638306, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:37<15:04, 3.80s/it] 54%|█████▍ | 283/520 [17:41<15:03, 3.81s/it] {'loss': 1.2908, 'grad_norm': 0.0010143556960520328, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:41<15:03, 3.81s/it] 55%|█████▍ | 284/520 [17:44<15:01, 3.82s/it] {'loss': 1.1526, 'grad_norm': 0.0010155175417528367, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:45<15:01, 3.82s/it] 55%|█████▍ | 285/520 [17:48<14:56, 3.81s/it] {'loss': 1.1736, 'grad_norm': 0.000914694834778864, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:48<14:56, 3.81s/it] 55%|█████▌ | 286/520 [17:52<14:54, 3.82s/it] {'loss': 1.0562, 'grad_norm': 0.0009541820482528748, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:52<14:54, 3.82s/it] 55%|█████▌ | 287/520 [17:56<14:50, 3.82s/it] {'loss': 1.2815, 'grad_norm': 0.0009431377185842451, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [17:56<14:50, 3.82s/it] 55%|█████▌ | 288/520 [18:00<14:48, 3.83s/it] {'loss': 1.3095, 'grad_norm': 0.0009041492423889184, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:00<14:48, 3.83s/it] 56%|█████▌ | 289/520 [18:04<14:48, 3.85s/it] {'loss': 1.1871, 'grad_norm': 0.0008900020321449768, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:04<14:48, 3.85s/it] 56%|█████▌ | 290/520 [18:08<14:42, 3.84s/it] {'loss': 1.1165, 'grad_norm': 0.0008740818038316422, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:08<14:42, 3.84s/it] 56%|█████▌ | 291/520 [18:11<14:43, 3.86s/it] {'loss': 1.1585, 'grad_norm': 0.0008829608169719335, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:11<14:43, 3.86s/it] 56%|█████▌ | 292/520 [18:15<14:33, 3.83s/it] {'loss': 1.2083, 'grad_norm': 0.0009033754070138402, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:15<14:33, 3.83s/it] 56%|█████▋ | 293/520 [18:19<14:23, 3.80s/it] {'loss': 1.1626, 'grad_norm': 0.0010100876657789196, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:19<14:23, 3.80s/it] 57%|█████▋ | 294/520 [18:23<14:14, 3.78s/it] {'loss': 1.1753, 'grad_norm': 0.0009736306651214892, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:23<14:14, 3.78s/it] 57%|█████▋ | 295/520 [18:27<14:17, 3.81s/it] {'loss': 1.191, 'grad_norm': 0.0009129745787814908, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:27<14:17, 3.81s/it] 57%|█████▋ | 296/520 [18:30<14:19, 3.83s/it] {'loss': 1.1312, 'grad_norm': 0.0010260232213898878, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:30<14:19, 3.83s/it] 57%|█████▋ | 297/520 [18:34<14:17, 3.85s/it] {'loss': 1.2633, 'grad_norm': 0.0010121627465524503, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:34<14:17, 3.85s/it] 57%|█████▋ | 298/520 [18:38<14:08, 3.82s/it] {'loss': 1.222, 'grad_norm': 0.0008796601381283599, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:38<14:08, 3.82s/it] 57%|█████▊ | 299/520 [18:42<13:53, 3.77s/it] {'loss': 1.2288, 'grad_norm': 0.0008586736583326972, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:42<13:53, 3.77s/it] 58%|█████▊ | 300/520 [18:45<13:38, 3.72s/it] {'loss': 1.2704, 'grad_norm': 0.0009233107997843487, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:45<13:38, 3.72s/it] 58%|█████▊ | 301/520 [18:49<13:29, 3.70s/it] {'loss': 1.2507, 'grad_norm': 0.0009231605881429182, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:49<13:29, 3.70s/it] 58%|█████▊ | 302/520 [18:53<13:22, 3.68s/it] {'loss': 1.2421, 'grad_norm': 0.0009443380994684135, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:53<13:22, 3.68s/it] 58%|█████▊ | 303/520 [18:56<13:16, 3.67s/it] {'loss': 1.1752, 'grad_norm': 0.0010345303503717046, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:56<13:16, 3.67s/it] 58%|█████▊ | 304/520 [19:00<13:23, 3.72s/it] {'loss': 1.1508, 'grad_norm': 0.0009827130412065207, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:00<13:23, 3.72s/it] 59%|█████▊ | 305/520 [19:04<13:23, 3.74s/it] {'loss': 1.2802, 'grad_norm': 0.0010639894717790908, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:04<13:23, 3.74s/it] 59%|█████▉ | 306/520 [19:08<13:21, 3.75s/it] {'loss': 1.2275, 'grad_norm': 0.0009526823264601074, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:08<13:21, 3.75s/it] 59%|█████▉ | 307/520 [19:11<13:21, 3.76s/it] {'loss': 1.1663, 'grad_norm': 0.0009469263263700541, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:11<13:21, 3.76s/it] 59%|█████▉ | 308/520 [19:15<13:18, 3.77s/it] {'loss': 1.279, 'grad_norm': 0.0009294222577026293, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:15<13:18, 3.77s/it] 59%|█████▉ | 309/520 [19:19<13:40, 3.89s/it] {'loss': 1.1694, 'grad_norm': 0.0009077160949790499, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:19<13:40, 3.89s/it] 60%|█████▉ | 310/520 [19:23<13:29, 3.85s/it] {'loss': 1.1498, 'grad_norm': 0.0009357631195692689, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:23<13:29, 3.85s/it] 60%|█████▉ | 311/520 [19:27<13:19, 3.83s/it] {'loss': 1.1256, 'grad_norm': 0.0009478261188936671, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:27<13:19, 3.83s/it] 60%|██████ | 312/520 [19:31<13:12, 3.81s/it] {'loss': 1.1163, 'grad_norm': 0.0009559892437909154, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:31<13:12, 3.81s/it] 60%|██████ | 313/520 [19:34<13:07, 3.81s/it] {'loss': 1.1056, 'grad_norm': 0.0008612678430503196, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:34<13:07, 3.81s/it] 60%|██████ | 314/520 [19:39<13:29, 3.93s/it] {'loss': 1.139, 'grad_norm': 0.0008899234233945604, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:39<13:29, 3.93s/it] 61%|██████ | 315/520 [19:42<13:14, 3.88s/it] {'loss': 1.193, 'grad_norm': 0.0010823631467225708, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:42<13:14, 3.88s/it] 61%|██████ | 316/520 [19:47<13:34, 3.99s/it] {'loss': 1.1219, 'grad_norm': 0.0009612694243930296, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:47<13:34, 3.99s/it] 61%|██████ | 317/520 [19:50<13:16, 3.92s/it] {'loss': 1.1318, 'grad_norm': 0.0008333876944019995, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:50<13:16, 3.92s/it] 61%|██████ | 318/520 [19:54<13:02, 3.88s/it] {'loss': 1.2408, 'grad_norm': 0.000994213267142327, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:54<13:02, 3.88s/it] 61%|██████▏ | 319/520 [19:58<13:11, 3.94s/it] {'loss': 1.1237, 'grad_norm': 0.0008621421204761383, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [19:58<13:11, 3.94s/it] 62%|██████▏ | 320/520 [20:02<12:58, 3.89s/it] {'loss': 1.07, 'grad_norm': 0.0009316863739767342, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:02<12:58, 3.89s/it] 62%|██████▏ | 321/520 [20:06<12:47, 3.86s/it] {'loss': 1.2652, 'grad_norm': 0.0009355962333867161, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:06<12:47, 3.86s/it] 62%|██████▏ | 322/520 [20:10<12:37, 3.82s/it] {'loss': 1.0968, 'grad_norm': 0.0008804447331886044, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:10<12:37, 3.82s/it] 62%|██████▏ | 323/520 [20:13<12:24, 3.78s/it] {'loss': 1.165, 'grad_norm': 0.000917496855031747, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:13<12:24, 3.78s/it] 62%|██████▏ | 324/520 [20:17<12:13, 3.74s/it] {'loss': 1.2034, 'grad_norm': 0.0009271696408304482, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:17<12:13, 3.74s/it] 62%|██████▎ | 325/520 [20:21<12:05, 3.72s/it] {'loss': 1.2035, 'grad_norm': 0.0009650944381049097, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:21<12:05, 3.72s/it] 63%|██████▎ | 326/520 [20:24<12:01, 3.72s/it] {'loss': 1.2019, 'grad_norm': 0.0009459431466233087, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:24<12:01, 3.72s/it] 63%|██████▎ | 327/520 [20:28<11:54, 3.70s/it] {'loss': 1.2028, 'grad_norm': 0.0009452568876713524, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:28<11:54, 3.70s/it] 63%|██████▎ | 328/520 [20:32<11:47, 3.68s/it] {'loss': 1.2459, 'grad_norm': 0.0009774457904456796, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:32<11:47, 3.68s/it] 63%|██████▎ | 329/520 [20:35<11:41, 3.67s/it] {'loss': 1.1271, 'grad_norm': 0.0008120809398988097, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:35<11:41, 3.67s/it] 63%|██████▎ | 330/520 [20:39<11:37, 3.67s/it] {'loss': 1.1983, 'grad_norm': 0.0008536497121836673, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:39<11:37, 3.67s/it] 64%|██████▎ | 331/520 [20:43<11:31, 3.66s/it] {'loss': 1.161, 'grad_norm': 0.0009181894561641774, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:43<11:31, 3.66s/it] 64%|██████▍ | 332/520 [20:46<11:28, 3.66s/it] {'loss': 1.2262, 'grad_norm': 0.0008470536679895841, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:46<11:28, 3.66s/it] 64%|██████▍ | 333/520 [20:50<11:24, 3.66s/it] {'loss': 1.2942, 'grad_norm': 0.000983978133759476, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:50<11:24, 3.66s/it] 64%|██████▍ | 334/520 [20:54<11:20, 3.66s/it] {'loss': 1.2074, 'grad_norm': 0.0009554297948816411, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:54<11:20, 3.66s/it] 64%|██████▍ | 335/520 [20:57<11:16, 3.65s/it] {'loss': 1.2066, 'grad_norm': 0.0008654483281214211, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:57<11:16, 3.65s/it] 65%|██████▍ | 336/520 [21:01<11:13, 3.66s/it] {'loss': 1.1077, 'grad_norm': 0.0009940530836701734, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:01<11:13, 3.66s/it] 65%|██████▍ | 337/520 [21:05<11:11, 3.67s/it] {'loss': 1.0988, 'grad_norm': 0.0008928456020336111, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:05<11:11, 3.67s/it] 65%|██████▌ | 338/520 [21:08<11:08, 3.67s/it] {'loss': 1.2072, 'grad_norm': 0.0008979617571014503, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:08<11:08, 3.67s/it] 65%|██████▌ | 339/520 [21:12<11:04, 3.67s/it] {'loss': 1.155, 'grad_norm': 0.0009125870038490753, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:12<11:04, 3.67s/it] 65%|██████▌ | 340/520 [21:16<11:01, 3.67s/it] {'loss': 1.1472, 'grad_norm': 0.0009441980712104716, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:16<11:01, 3.67s/it] 66%|██████▌ | 341/520 [21:19<10:56, 3.67s/it] {'loss': 1.1712, 'grad_norm': 0.0009518542669632904, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:19<10:56, 3.67s/it] 66%|██████▌ | 342/520 [21:23<10:52, 3.66s/it] {'loss': 1.2003, 'grad_norm': 0.0011328950987515745, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:23<10:52, 3.66s/it] 66%|██████▌ | 343/520 [21:27<10:50, 3.67s/it] {'loss': 1.1532, 'grad_norm': 0.0008529922950508962, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:27<10:50, 3.67s/it] 66%|██████▌ | 344/520 [21:30<10:47, 3.68s/it] {'loss': 1.1268, 'grad_norm': 0.0008704458715580204, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:30<10:47, 3.68s/it] 66%|██████▋ | 345/520 [21:34<10:43, 3.68s/it] {'loss': 1.2314, 'grad_norm': 0.0009540554642389026, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:34<10:43, 3.68s/it] 67%|██████▋ | 346/520 [21:38<10:46, 3.71s/it] {'loss': 1.1732, 'grad_norm': 0.000925544483759958, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:38<10:46, 3.71s/it] 67%|██████▋ | 347/520 [21:42<10:48, 3.75s/it] {'loss': 1.1404, 'grad_norm': 0.0008469568787716931, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:42<10:48, 3.75s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:45<10:48, 3.77s/it] {'loss': 1.1007, 'grad_norm': 0.0010948865777294496, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:45<10:48, 3.77s/it] 67%|██████▋ | 349/520 [21:49<10:49, 3.80s/it] {'loss': 1.1382, 'grad_norm': 0.0008959591463597392, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:49<10:49, 3.80s/it] 67%|██████▋ | 350/520 [21:53<10:47, 3.81s/it] {'loss': 1.1816, 'grad_norm': 0.0010117278821355782, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:53<10:47, 3.81s/it] 68%|██████▊ | 351/520 [21:57<10:45, 3.82s/it] {'loss': 1.0915, 'grad_norm': 0.000854174083663355, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:57<10:45, 3.82s/it] 68%|██████▊ | 352/520 [22:01<10:42, 3.83s/it] {'loss': 1.2096, 'grad_norm': 0.0008708256277315862, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:01<10:42, 3.83s/it] 68%|██████▊ | 353/520 [22:05<10:42, 3.84s/it] {'loss': 1.1349, 'grad_norm': 0.0007744910533269828, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:05<10:42, 3.84s/it] 68%|██████▊ | 354/520 [22:09<10:36, 3.84s/it] {'loss': 1.2388, 'grad_norm': 0.0008510371494253023, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:09<10:36, 3.84s/it] 68%|██████▊ | 355/520 [22:12<10:32, 3.83s/it] {'loss': 1.1528, 'grad_norm': 0.0008829926840348442, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:12<10:32, 3.83s/it] 68%|██████▊ | 356/520 [22:16<10:30, 3.85s/it] {'loss': 1.1525, 'grad_norm': 0.000919154335870757, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:16<10:30, 3.85s/it] 69%|██████▊ | 357/520 [22:20<10:27, 3.85s/it] {'loss': 1.1837, 'grad_norm': 0.0008651236038540708, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:20<10:27, 3.85s/it] 69%|██████▉ | 358/520 [22:24<10:25, 3.86s/it] {'loss': 1.1194, 'grad_norm': 0.0008998865226237085, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:24<10:25, 3.86s/it] 69%|██████▉ | 359/520 [22:28<10:21, 3.86s/it] {'loss': 1.1762, 'grad_norm': 0.0009270955822962193, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:28<10:21, 3.86s/it] 69%|██████▉ | 360/520 [22:32<10:26, 3.92s/it] {'loss': 1.1849, 'grad_norm': 0.0009509213476496781, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:32<10:26, 3.92s/it] 69%|██████▉ | 361/520 [22:36<10:27, 3.95s/it] {'loss': 1.1961, 'grad_norm': 0.0008484992280495601, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:36<10:27, 3.95s/it] 70%|██████▉ | 362/520 [22:40<10:19, 3.92s/it] {'loss': 1.1691, 'grad_norm': 0.000979722820279452, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:40<10:19, 3.92s/it] 70%|██████▉ | 363/520 [22:44<10:11, 3.90s/it] {'loss': 1.1928, 'grad_norm': 0.000916065385535619, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:44<10:11, 3.90s/it] 70%|███████ | 364/520 [22:47<10:05, 3.88s/it] {'loss': 1.2179, 'grad_norm': 0.0009785764100479221, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:47<10:05, 3.88s/it] 70%|███████ | 365/520 [22:51<10:01, 3.88s/it] {'loss': 1.2466, 'grad_norm': 0.0009365178618778276, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:51<10:01, 3.88s/it] 70%|███████ | 366/520 [22:55<09:56, 3.88s/it] {'loss': 1.2057, 'grad_norm': 0.0008758865702734137, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:55<09:56, 3.88s/it] 71%|███████ | 367/520 [22:59<09:53, 3.88s/it] {'loss': 1.2083, 'grad_norm': 0.0009232461945615021, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [22:59<09:53, 3.88s/it] 71%|███████ | 368/520 [23:03<09:50, 3.88s/it] {'loss': 1.0588, 'grad_norm': 0.0009314677075417153, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:03<09:50, 3.88s/it] 71%|███████ | 369/520 [23:07<09:46, 3.89s/it] {'loss': 1.1729, 'grad_norm': 0.0008160941336705683, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:07<09:46, 3.89s/it] 71%|███████ | 370/520 [23:11<09:41, 3.88s/it] {'loss': 1.1225, 'grad_norm': 0.0008871880872781679, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:11<09:41, 3.88s/it] 71%|███████▏ | 371/520 [23:15<09:37, 3.87s/it] {'loss': 1.1218, 'grad_norm': 0.0009483168747019745, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:15<09:37, 3.87s/it] 72%|███████▏ | 372/520 [23:18<09:26, 3.83s/it] {'loss': 1.2444, 'grad_norm': 0.0008703875457464553, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:18<09:26, 3.83s/it] 72%|███████▏ | 373/520 [23:22<09:16, 3.79s/it] {'loss': 1.1334, 'grad_norm': 0.0009774564671242356, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:22<09:16, 3.79s/it] 72%|███████▏ | 374/520 [23:26<09:07, 3.75s/it] {'loss': 1.207, 'grad_norm': 0.0009065052646987839, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:26<09:07, 3.75s/it] 72%|███████▏ | 375/520 [23:29<09:00, 3.73s/it] {'loss': 1.1257, 'grad_norm': 0.0008892952669548343, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:29<09:00, 3.73s/it] 72%|███████▏ | 376/520 [23:33<08:54, 3.71s/it] {'loss': 1.2328, 'grad_norm': 0.0008710463369664328, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:33<08:54, 3.71s/it] 72%|███████▎ | 377/520 [23:37<08:47, 3.69s/it] {'loss': 1.1636, 'grad_norm': 0.0009408765872865865, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:37<08:47, 3.69s/it] 73%|███████▎ | 378/520 [23:40<08:44, 3.69s/it] {'loss': 1.228, 'grad_norm': 0.0008617165820741414, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:40<08:44, 3.69s/it] 73%|███████▎ | 379/520 [23:44<08:39, 3.69s/it] {'loss': 1.2032, 'grad_norm': 0.0008739203182642984, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:44<08:39, 3.69s/it] 73%|███████▎ | 380/520 [23:48<08:35, 3.68s/it] {'loss': 1.22, 'grad_norm': 0.0009001255279080219, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:48<08:35, 3.68s/it] 73%|███████▎ | 381/520 [23:51<08:31, 3.68s/it] {'loss': 1.207, 'grad_norm': 0.0008861780436908363, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:51<08:31, 3.68s/it] 73%|███████▎ | 382/520 [23:55<08:27, 3.68s/it] {'loss': 1.1855, 'grad_norm': 0.0008699353437007816, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:55<08:27, 3.68s/it] 74%|███████▎ | 383/520 [23:59<08:23, 3.68s/it] {'loss': 1.0445, 'grad_norm': 0.0009987689069919752, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:59<08:23, 3.68s/it] 74%|███████▍ | 384/520 [24:02<08:19, 3.67s/it] {'loss': 1.2229, 'grad_norm': 0.000810038285214752, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:02<08:19, 3.67s/it] 74%|███████▍ | 385/520 [24:06<08:15, 3.67s/it] {'loss': 1.1838, 'grad_norm': 0.0008325886650228976, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:06<08:15, 3.67s/it] 74%|███████▍ | 386/520 [24:10<08:13, 3.68s/it] {'loss': 1.1392, 'grad_norm': 0.0007895727328080857, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:10<08:13, 3.68s/it] 74%|███████▍ | 387/520 [24:13<08:09, 3.68s/it] {'loss': 1.2451, 'grad_norm': 0.0009411952380573224, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:13<08:09, 3.68s/it] 75%|███████▍ | 388/520 [24:17<08:05, 3.68s/it] {'loss': 1.0944, 'grad_norm': 0.0008598757065916455, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:17<08:05, 3.68s/it] 75%|███████▍ | 389/520 [24:21<08:01, 3.68s/it] {'loss': 1.1408, 'grad_norm': 0.001014021588475843, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:21<08:01, 3.68s/it] 75%|███████▌ | 390/520 [24:24<07:57, 3.68s/it] {'loss': 1.2049, 'grad_norm': 0.000891197812334963, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:24<07:57, 3.68s/it] 75%|███████▌ | 391/520 [24:28<07:53, 3.67s/it] {'loss': 1.277, 'grad_norm': 0.0009478810580470046, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:28<07:53, 3.67s/it] 75%|███████▌ | 392/520 [24:32<07:49, 3.67s/it] {'loss': 1.0977, 'grad_norm': 0.0009089693804416159, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:32<07:49, 3.67s/it] 76%|███████▌ | 393/520 [24:35<07:44, 3.66s/it] {'loss': 1.0936, 'grad_norm': 0.0007659141100505869, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:35<07:44, 3.66s/it] 76%|███████▌ | 394/520 [24:39<07:40, 3.65s/it] {'loss': 1.1643, 'grad_norm': 0.0009417870082200383, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:39<07:40, 3.65s/it] 76%|███████▌ | 395/520 [24:43<07:35, 3.65s/it] {'loss': 1.1292, 'grad_norm': 0.000970719577746596, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:43<07:35, 3.65s/it] 76%|███████▌ | 396/520 [24:46<07:31, 3.64s/it] {'loss': 1.2121, 'grad_norm': 0.0010150007148710645, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:46<07:31, 3.64s/it] 76%|███████▋ | 397/520 [24:50<07:30, 3.66s/it] {'loss': 1.1841, 'grad_norm': 0.0008756529891740628, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:50<07:30, 3.66s/it] 77%|███████▋ | 398/520 [24:54<07:27, 3.66s/it] {'loss': 1.1833, 'grad_norm': 0.0009388433604028527, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:54<07:27, 3.66s/it] 77%|███████▋ | 399/520 [24:57<07:23, 3.67s/it] {'loss': 1.1314, 'grad_norm': 0.0008369913401370635, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [24:57<07:23, 3.67s/it] 77%|███████▋ | 400/520 [25:01<07:19, 3.67s/it] {'loss': 1.1642, 'grad_norm': 0.0008025299505220457, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:01<07:19, 3.67s/it] 77%|███████▋ | 401/520 [25:05<07:15, 3.66s/it] {'loss': 1.0188, 'grad_norm': 0.0009428742724227529, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:05<07:15, 3.66s/it] 77%|███████▋ | 402/520 [25:08<07:11, 3.66s/it] {'loss': 1.1479, 'grad_norm': 0.0009397285946570596, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:08<07:11, 3.66s/it] 78%|███████▊ | 403/520 [25:12<07:08, 3.66s/it] {'loss': 1.1693, 'grad_norm': 0.0009769016321124477, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:12<07:08, 3.66s/it] 78%|███████▊ | 404/520 [25:16<07:04, 3.66s/it] {'loss': 1.0812, 'grad_norm': 0.0010284211170459175, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:16<07:04, 3.66s/it] 78%|███████▊ | 405/520 [25:19<07:00, 3.66s/it] {'loss': 1.1453, 'grad_norm': 0.000925068908881478, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:19<07:00, 3.66s/it] 78%|███████▊ | 406/520 [25:23<06:59, 3.68s/it] {'loss': 1.0612, 'grad_norm': 0.0010963268091157474, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:23<06:59, 3.68s/it] 78%|███████▊ | 407/520 [25:27<07:02, 3.74s/it] {'loss': 1.247, 'grad_norm': 0.000911169379874927, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:27<07:02, 3.74s/it] 78%|███████▊ | 408/520 [25:31<07:03, 3.79s/it] {'loss': 1.1594, 'grad_norm': 0.0010367919408543291, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:31<07:03, 3.79s/it] 79%|███████▊ | 409/520 [25:35<07:03, 3.82s/it] {'loss': 1.2745, 'grad_norm': 0.0009848674484645922, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:35<07:03, 3.82s/it] 79%|███████▉ | 410/520 [25:39<07:02, 3.84s/it] {'loss': 1.0155, 'grad_norm': 0.0009128569988345864, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:39<07:02, 3.84s/it] 79%|███████▉ | 411/520 [25:42<07:00, 3.86s/it] {'loss': 1.2582, 'grad_norm': 0.0009832862871181293, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:42<07:00, 3.86s/it] 79%|███████▉ | 412/520 [25:46<06:58, 3.87s/it] {'loss': 1.1655, 'grad_norm': 0.0009098749507861383, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:46<06:58, 3.87s/it] 79%|███████▉ | 413/520 [25:50<06:55, 3.88s/it] {'loss': 1.1536, 'grad_norm': 0.0008537356441456321, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:50<06:55, 3.88s/it] 80%|███████▉ | 414/520 [25:54<06:52, 3.89s/it] {'loss': 0.9697, 'grad_norm': 0.0007535062326091655, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:54<06:52, 3.89s/it] 80%|███████▉ | 415/520 [25:58<06:42, 3.83s/it] {'loss': 1.1471, 'grad_norm': 0.000863484283033768, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [25:58<06:42, 3.83s/it] 80%|████████ | 416/520 [26:02<06:33, 3.79s/it] {'loss': 1.0608, 'grad_norm': 0.0009918488112273438, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:02<06:33, 3.79s/it] 80%|████████ | 417/520 [26:05<06:26, 3.75s/it] {'loss': 1.2206, 'grad_norm': 0.0009431676841342506, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:05<06:26, 3.75s/it] 80%|████████ | 418/520 [26:09<06:20, 3.73s/it] {'loss': 1.2092, 'grad_norm': 0.0008710922941258671, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:09<06:20, 3.73s/it] 81%|████████ | 419/520 [26:13<06:13, 3.70s/it] {'loss': 1.2027, 'grad_norm': 0.000984747961546533, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:13<06:13, 3.70s/it] 81%|████████ | 420/520 [26:16<06:09, 3.69s/it] {'loss': 1.0983, 'grad_norm': 0.000967644386797145, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:16<06:09, 3.69s/it] 81%|████████ | 421/520 [26:20<06:04, 3.69s/it] {'loss': 1.0331, 'grad_norm': 0.0010840214690852625, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:20<06:04, 3.69s/it] 81%|████████ | 422/520 [26:24<05:59, 3.67s/it] {'loss': 1.1511, 'grad_norm': 0.0009421395777408991, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:24<05:59, 3.67s/it] 81%|████████▏ | 423/520 [26:27<05:55, 3.67s/it] {'loss': 1.1254, 'grad_norm': 0.0009884223802000209, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:27<05:55, 3.67s/it] 82%|████████▏ | 424/520 [26:31<05:52, 3.68s/it] {'loss': 1.2413, 'grad_norm': 0.0008683548391255801, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:31<05:52, 3.68s/it] 82%|████████▏ | 425/520 [26:35<05:48, 3.67s/it] {'loss': 1.1426, 'grad_norm': 0.000910296794995291, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:35<05:48, 3.67s/it] 82%|████████▏ | 426/520 [26:38<05:44, 3.67s/it] {'loss': 1.1649, 'grad_norm': 0.0011987111282261678, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:38<05:44, 3.67s/it] 82%|████████▏ | 427/520 [26:42<05:40, 3.66s/it] {'loss': 1.0765, 'grad_norm': 0.0008834384629542686, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:42<05:40, 3.66s/it] 82%|████████▏ | 428/520 [26:45<05:36, 3.66s/it] {'loss': 1.0619, 'grad_norm': 0.0009534313011588595, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:45<05:36, 3.66s/it] 82%|████████▎ | 429/520 [26:49<05:34, 3.67s/it] {'loss': 1.1557, 'grad_norm': 0.0009493564165241312, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:49<05:34, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:53<05:36, 3.74s/it] {'loss': 1.1558, 'grad_norm': 0.0008456364333848383, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:53<05:36, 3.74s/it] 83%|████████▎ | 431/520 [26:57<05:33, 3.75s/it] {'loss': 1.1292, 'grad_norm': 0.0009288710913719458, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [26:57<05:33, 3.75s/it] 83%|████████▎ | 432/520 [27:01<05:28, 3.74s/it] {'loss': 1.0703, 'grad_norm': 0.0009356692741912796, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:01<05:28, 3.74s/it] 83%|████████▎ | 433/520 [27:04<05:22, 3.71s/it] {'loss': 1.1988, 'grad_norm': 0.0008930442678901758, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:04<05:22, 3.71s/it] 83%|████████▎ | 434/520 [27:08<05:18, 3.70s/it] {'loss': 0.9476, 'grad_norm': 0.0009056671973909961, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:08<05:18, 3.70s/it] 84%|████████▎ | 435/520 [27:12<05:12, 3.68s/it] {'loss': 1.2324, 'grad_norm': 0.0009846464593279882, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:12<05:12, 3.68s/it] 84%|████████▍ | 436/520 [27:15<05:08, 3.67s/it] {'loss': 1.0393, 'grad_norm': 0.0009437217681350045, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:15<05:08, 3.67s/it] 84%|████████▍ | 437/520 [27:19<05:05, 3.68s/it] {'loss': 1.2541, 'grad_norm': 0.0009253866866558218, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:19<05:05, 3.68s/it] 84%|████████▍ | 438/520 [27:23<05:01, 3.67s/it] {'loss': 1.0772, 'grad_norm': 0.0009195585958292273, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:23<05:01, 3.67s/it] 84%|████████▍ | 439/520 [27:26<04:56, 3.66s/it] {'loss': 1.1146, 'grad_norm': 0.0007679279142841614, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:26<04:56, 3.66s/it] 85%|████████▍ | 440/520 [27:30<04:52, 3.66s/it] {'loss': 1.1081, 'grad_norm': 0.0009140048317029389, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:30<04:52, 3.66s/it] 85%|████████▍ | 441/520 [27:34<04:49, 3.67s/it] {'loss': 1.1247, 'grad_norm': 0.0008603354783174698, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:34<04:49, 3.67s/it] 85%|████████▌ | 442/520 [27:37<04:46, 3.67s/it] {'loss': 1.1745, 'grad_norm': 0.0009776273152350703, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:37<04:46, 3.67s/it] 85%|████████▌ | 443/520 [27:41<04:46, 3.72s/it] {'loss': 1.1875, 'grad_norm': 0.0008859557021278977, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:41<04:46, 3.72s/it] 85%|████████▌ | 444/520 [27:45<04:47, 3.78s/it] {'loss': 1.1491, 'grad_norm': 0.0008207769054072034, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:45<04:47, 3.78s/it] 86%|████████▌ | 445/520 [27:49<04:46, 3.82s/it] {'loss': 1.0791, 'grad_norm': 0.0008798371218841223, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:49<04:46, 3.82s/it] 86%|████████▌ | 446/520 [27:53<04:45, 3.85s/it] {'loss': 1.2018, 'grad_norm': 0.0008191397831966304, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:53<04:45, 3.85s/it] 86%|████████▌ | 447/520 [27:57<04:42, 3.87s/it] {'loss': 1.1548, 'grad_norm': 0.000876443794084748, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [27:57<04:42, 3.87s/it] 86%|████████▌ | 448/520 [28:01<04:38, 3.87s/it] {'loss': 1.1458, 'grad_norm': 0.0009334839247301687, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:01<04:38, 3.87s/it] 86%|████████▋ | 449/520 [28:04<04:35, 3.88s/it] {'loss': 1.1584, 'grad_norm': 0.0008854947253419567, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:04<04:35, 3.88s/it] 87%|████████▋ | 450/520 [28:08<04:31, 3.89s/it] {'loss': 1.175, 'grad_norm': 0.0009041164425244169, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:08<04:31, 3.89s/it] 87%|████████▋ | 451/520 [28:12<04:29, 3.90s/it] {'loss': 1.1759, 'grad_norm': 0.0009312817259634098, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:12<04:29, 3.90s/it] 87%|████████▋ | 452/520 [28:16<04:25, 3.91s/it] {'loss': 1.2045, 'grad_norm': 0.0008464639830680301, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:16<04:25, 3.91s/it] 87%|████████▋ | 453/520 [28:20<04:22, 3.92s/it] {'loss': 1.1808, 'grad_norm': 0.0008762884685017993, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:20<04:22, 3.92s/it] 87%|████████▋ | 454/520 [28:24<04:19, 3.93s/it] {'loss': 1.0864, 'grad_norm': 0.0009298999216867702, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:24<04:19, 3.93s/it] 88%|████████▊ | 455/520 [28:28<04:14, 3.92s/it] {'loss': 1.2275, 'grad_norm': 0.0008928731363826971, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:28<04:14, 3.92s/it] 88%|████████▊ | 456/520 [28:32<04:10, 3.92s/it] {'loss': 1.1545, 'grad_norm': 0.0009094781978237284, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:32<04:10, 3.92s/it] 88%|████████▊ | 457/520 [28:36<04:06, 3.91s/it] {'loss': 1.0811, 'grad_norm': 0.0007839137017246374, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:36<04:06, 3.91s/it] 88%|████████▊ | 458/520 [28:40<04:02, 3.91s/it] {'loss': 1.2784, 'grad_norm': 0.000983791909683877, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:40<04:02, 3.91s/it] 88%|████████▊ | 459/520 [28:44<03:58, 3.91s/it] {'loss': 1.2095, 'grad_norm': 0.0009481965224472752, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:44<03:58, 3.91s/it] 88%|████████▊ | 460/520 [28:48<03:54, 3.91s/it] {'loss': 1.0979, 'grad_norm': 0.000874546510131672, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:48<03:54, 3.91s/it] 89%|████████▊ | 461/520 [28:52<03:51, 3.93s/it] {'loss': 1.1626, 'grad_norm': 0.0006981649972509126, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:52<03:51, 3.93s/it] 89%|████████▉ | 462/520 [28:55<03:47, 3.92s/it] {'loss': 1.2502, 'grad_norm': 0.0008698707166466538, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:55<03:47, 3.92s/it] 89%|████████▉ | 463/520 [28:59<03:39, 3.85s/it] {'loss': 1.0595, 'grad_norm': 0.0009366522644317053, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [28:59<03:39, 3.85s/it] 89%|████████▉ | 464/520 [29:03<03:32, 3.79s/it] {'loss': 1.1873, 'grad_norm': 0.0009526557900018462, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:03<03:32, 3.79s/it] 89%|████████▉ | 465/520 [29:06<03:26, 3.75s/it] {'loss': 1.2912, 'grad_norm': 0.0009382821255274203, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:06<03:26, 3.75s/it] 90%|████████▉ | 466/520 [29:10<03:20, 3.72s/it] {'loss': 1.183, 'grad_norm': 0.0008422592423561682, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:10<03:20, 3.72s/it] 90%|████████▉ | 467/520 [29:14<03:17, 3.72s/it] {'loss': 1.1383, 'grad_norm': 0.0008190680291870156, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:14<03:17, 3.72s/it] 90%|█████████ | 468/520 [29:17<03:12, 3.70s/it] {'loss': 1.1571, 'grad_norm': 0.0010201970161825011, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:17<03:12, 3.70s/it] 90%|█████████ | 469/520 [29:21<03:08, 3.70s/it] {'loss': 1.2219, 'grad_norm': 0.0010076988434648646, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:21<03:08, 3.70s/it] 90%|█████████ | 470/520 [29:25<03:04, 3.68s/it] {'loss': 1.0982, 'grad_norm': 0.0008269237643517331, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:25<03:04, 3.68s/it] 91%|█████████ | 471/520 [29:28<02:59, 3.67s/it] {'loss': 1.1202, 'grad_norm': 0.0009421186878823545, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:28<02:59, 3.67s/it] 91%|█████████ | 472/520 [29:32<02:56, 3.67s/it] {'loss': 1.0914, 'grad_norm': 0.0009080046766115436, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:32<02:56, 3.67s/it] 91%|█████████ | 473/520 [29:36<02:52, 3.67s/it] {'loss': 1.1539, 'grad_norm': 0.0009229093453851626, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:36<02:52, 3.67s/it] 91%|█████████ | 474/520 [29:39<02:48, 3.67s/it] {'loss': 1.1765, 'grad_norm': 0.0008743014586366793, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:39<02:48, 3.67s/it] 91%|█████████▏| 475/520 [29:43<02:44, 3.66s/it] {'loss': 1.0941, 'grad_norm': 0.0008530683953036131, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:43<02:44, 3.66s/it] 92%|█████████▏| 476/520 [29:47<02:40, 3.66s/it] {'loss': 1.1453, 'grad_norm': 0.0009307057402843398, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:47<02:40, 3.66s/it] 92%|█████████▏| 477/520 [29:50<02:37, 3.65s/it] {'loss': 1.1362, 'grad_norm': 0.0009916098706633995, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:50<02:37, 3.65s/it] 92%|█████████▏| 478/520 [29:54<02:33, 3.65s/it] {'loss': 1.087, 'grad_norm': 0.0009176755335353913, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:54<02:33, 3.65s/it] 92%|█████████▏| 479/520 [29:58<02:29, 3.65s/it] {'loss': 1.1413, 'grad_norm': 0.0009287566879826701, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:58<02:29, 3.65s/it] 92%|█████████▏| 480/520 [30:01<02:26, 3.66s/it] {'loss': 1.1597, 'grad_norm': 0.0008338922602327971, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:01<02:26, 3.66s/it] 92%|█████████▎| 481/520 [30:05<02:22, 3.67s/it] {'loss': 1.1514, 'grad_norm': 0.0008469812237799557, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:05<02:22, 3.67s/it] 93%|█████████▎| 482/520 [30:09<02:19, 3.66s/it] {'loss': 1.1695, 'grad_norm': 0.0008624017369730534, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:09<02:19, 3.66s/it] 93%|█████████▎| 483/520 [30:12<02:15, 3.66s/it] {'loss': 1.1541, 'grad_norm': 0.000962836745038325, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:12<02:15, 3.66s/it] 93%|█████████▎| 484/520 [30:16<02:12, 3.68s/it] {'loss': 1.1602, 'grad_norm': 0.0009396850932398783, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:16<02:12, 3.68s/it] 93%|█████████▎| 485/520 [30:20<02:08, 3.68s/it] {'loss': 1.1148, 'grad_norm': 0.0008651903121336163, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:20<02:08, 3.68s/it] 93%|█████████▎| 486/520 [30:23<02:04, 3.66s/it] {'loss': 1.2357, 'grad_norm': 0.0009501699302375091, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:23<02:04, 3.66s/it] 94%|█████████▎| 487/520 [30:27<02:00, 3.66s/it] {'loss': 1.0911, 'grad_norm': 0.0010032633158514243, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:27<02:00, 3.66s/it] 94%|█████████▍| 488/520 [30:31<01:57, 3.66s/it] {'loss': 1.0377, 'grad_norm': 0.000919597022551705, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:31<01:57, 3.66s/it] 94%|█████████▍| 489/520 [30:34<01:52, 3.64s/it] {'loss': 1.1726, 'grad_norm': 0.000769951437991283, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:34<01:52, 3.64s/it] 94%|█████████▍| 490/520 [30:38<01:49, 3.64s/it] {'loss': 1.1559, 'grad_norm': 0.0009141188037989056, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:38<01:49, 3.64s/it] 94%|█████████▍| 491/520 [30:42<01:45, 3.64s/it] {'loss': 1.1249, 'grad_norm': 0.0009637519429261428, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:42<01:45, 3.64s/it] 95%|█████████▍| 492/520 [30:45<01:41, 3.64s/it] {'loss': 1.2321, 'grad_norm': 0.0009563339136901039, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:45<01:41, 3.64s/it] 95%|█████████▍| 493/520 [30:49<01:38, 3.65s/it] {'loss': 1.1678, 'grad_norm': 0.0008890527574186352, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:49<01:38, 3.65s/it] 95%|█████████▌| 494/520 [30:53<01:34, 3.64s/it] {'loss': 1.1698, 'grad_norm': 0.0008473995884294256, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:53<01:34, 3.64s/it] 95%|█████████▌| 495/520 [30:56<01:30, 3.64s/it] {'loss': 1.1421, 'grad_norm': 0.0009623577713799923, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [30:56<01:30, 3.64s/it] 95%|█████████▌| 496/520 [31:00<01:27, 3.65s/it] {'loss': 1.0624, 'grad_norm': 0.0009177217837377816, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:00<01:27, 3.65s/it] 96%|█████████▌| 497/520 [31:03<01:23, 3.64s/it] {'loss': 1.101, 'grad_norm': 0.0007836163536828402, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:03<01:23, 3.64s/it] 96%|█████████▌| 498/520 [31:07<01:20, 3.64s/it] {'loss': 1.1366, 'grad_norm': 0.0008942513984891202, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:07<01:20, 3.64s/it] 96%|█████████▌| 499/520 [31:11<01:16, 3.65s/it] {'loss': 1.2367, 'grad_norm': 0.0009549624177252621, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:11<01:16, 3.65s/it] 96%|█████████▌| 500/520 [31:14<01:13, 3.65s/it] {'loss': 1.2537, 'grad_norm': 0.0010520263955393955, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:14<01:13, 3.65s/it] 96%|█████████▋| 501/520 [31:18<01:09, 3.66s/it] {'loss': 1.1454, 'grad_norm': 0.0009709929754424683, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:18<01:09, 3.66s/it] 97%|█████████▋| 502/520 [31:22<01:05, 3.66s/it] {'loss': 1.1716, 'grad_norm': 0.0008584433042480357, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:22<01:05, 3.66s/it] 97%|█████████▋| 503/520 [31:25<01:02, 3.65s/it] {'loss': 1.1347, 'grad_norm': 0.0009353720133525483, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:25<01:02, 3.65s/it] 97%|█████████▋| 504/520 [31:29<00:58, 3.68s/it] {'loss': 1.1642, 'grad_norm': 0.001066823069196862, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:29<00:58, 3.68s/it] 97%|█████████▋| 505/520 [31:33<00:55, 3.67s/it] {'loss': 1.1947, 'grad_norm': 0.000963737504768382, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:33<00:55, 3.67s/it] 97%|█████████▋| 506/520 [31:36<00:51, 3.66s/it] {'loss': 1.129, 'grad_norm': 0.0009270407299440107, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:36<00:51, 3.66s/it] 98%|█████████▊| 507/520 [31:40<00:47, 3.67s/it] {'loss': 1.2769, 'grad_norm': 0.0008258902524287385, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:40<00:47, 3.67s/it] 98%|█████████▊| 508/520 [31:44<00:43, 3.65s/it] {'loss': 1.2421, 'grad_norm': 0.0009386593974306428, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:44<00:43, 3.65s/it] 98%|█████████▊| 509/520 [31:47<00:40, 3.65s/it] {'loss': 1.2175, 'grad_norm': 0.0008800906512316449, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:47<00:40, 3.65s/it] 98%|█████████▊| 510/520 [31:51<00:36, 3.66s/it] {'loss': 1.164, 'grad_norm': 0.000904532223942608, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:51<00:36, 3.66s/it] 98%|█████████▊| 511/520 [31:55<00:32, 3.65s/it] {'loss': 1.1339, 'grad_norm': 0.0008715874317368923, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [31:55<00:32, 3.65s/it] 98%|█████████▊| 512/520 [31:58<00:29, 3.64s/it] {'loss': 1.0233, 'grad_norm': 0.0009264918926134664, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [31:58<00:29, 3.64s/it] 99%|█████████▊| 513/520 [32:02<00:25, 3.66s/it] {'loss': 1.2194, 'grad_norm': 0.0009929301645277206, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:02<00:25, 3.66s/it] 99%|█████████▉| 514/520 [32:06<00:21, 3.66s/it] {'loss': 1.1847, 'grad_norm': 0.000831609752910406, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:06<00:21, 3.66s/it] 99%|█████████▉| 515/520 [32:09<00:18, 3.66s/it] {'loss': 1.233, 'grad_norm': 0.0010643907053951944, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:09<00:18, 3.66s/it] 99%|█████████▉| 516/520 [32:13<00:14, 3.66s/it] {'loss': 1.144, 'grad_norm': 0.0008818849052972241, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:13<00:14, 3.66s/it] 99%|█████████▉| 517/520 [32:17<00:10, 3.65s/it] {'loss': 1.1723, 'grad_norm': 0.0008460243194560999, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:17<00:10, 3.65s/it] 100%|█████████▉| 518/520 [32:20<00:07, 3.64s/it] {'loss': 1.1528, 'grad_norm': 0.0009683097788345709, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:20<00:07, 3.64s/it] 100%|█████████▉| 519/520 [32:24<00:03, 3.63s/it] {'loss': 1.1439, 'grad_norm': 0.0008698075829666415, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:24<00:03, 3.63s/it] 100%|██████████| 520/520 [32:28<00:00, 3.89s/it] {'loss': 1.1382, 'grad_norm': 0.0008982425364588405, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:28<00:00, 3.89s/it] {'train_runtime': 1948.8247, 'train_samples_per_second': 34.138, 'train_steps_per_second': 0.267, 'train_loss': 1.2242424825063118, 'epoch': 1.0} + 100%|██████████| 520/520 [32:28<00:00, 3.89s/it] 100%|██████████| 520/520 [32:28<00:00, 3.75s/it] +[2025-10-13 19:12:29,166] [INFO] [launch.py:348:main] Process 967821 exits successfully. +[2025-10-13 19:12:29,167] [INFO] [launch.py:348:main] Process 967822 exits successfully. +[2025-10-13 19:12:30,168] [INFO] [launch.py:348:main] Process 967824 exits successfully. +[2025-10-13 19:12:30,169] [INFO] [launch.py:348:main] Process 967818 exits successfully. +[2025-10-13 19:12:31,170] [INFO] [launch.py:348:main] Process 967823 exits successfully. +[2025-10-13 19:12:31,171] [INFO] [launch.py:348:main] Process 967819 exits successfully. +[2025-10-13 19:12:31,171] [INFO] [launch.py:348:main] Process 967820 exits successfully. +[2025-10-13 19:12:34,175] [INFO] [launch.py:348:main] Process 967817 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.5_2e-1_connector-5.0_1.5_2e-1_ablation_20251013_183828.log +Timestamp: 2025-10-13 19:12:36 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation_20251013_191236.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation_20251013_191236.log new file mode 100644 index 0000000000000000000000000000000000000000..410d06f01fd28a30be5a0bcba336bfe2bbd1f35b --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation_20251013_191236.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation_20251013_191236.log +Timestamp: 2025-10-13 19:12:36 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 19:12:39,348] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:42,759] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 19:12:42,761] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 1.7 --temperature_mlp_text 1.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 1.7 --temperature_mlp_vision 1.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 1.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 19:12:45,374] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:46,410] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 19:12:46,410] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 19:12:46,410] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 19:12:46,411] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 19:12:46,411] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 19:12:46,411] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 19:12:46,411] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 19:12:46,413] [INFO] [launch.py:253:main] process 987599 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:12:46,415] [INFO] [launch.py:253:main] process 987600 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:12:46,417] [INFO] [launch.py:253:main] process 987601 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:12:46,419] [INFO] [launch.py:253:main] process 987602 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:12:46,421] [INFO] [launch.py:253:main] process 987603 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:12:46,424] [INFO] [launch.py:253:main] process 987604 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:12:46,426] [INFO] [launch.py:253:main] process 987605 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:12:46,428] [INFO] [launch.py:253:main] process 987606 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 19:12:53,214] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:53,214] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:53,232] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:53,232] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:53,247] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:53,251] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:53,255] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:53,256] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:12:53,645] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:12:53,645] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:12:53,645] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 19:12:53,646] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:12:53,647] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:12:53,664] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:12:53,664] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:12:53,665] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:12:53,674] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: Apply masks for the following modules: ['llm', 'connector'] +['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.7, 'temperature_mlp': 1.7, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.7, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.7, + "temperature_mlp": 1.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:987599:987599 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987599:987599 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987599:987599 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:987599:987599 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:987599:987599 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:987599:987599 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:987601:987601 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:987601:987601 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987601:987601 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987601:987601 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:987601:987601 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:987601:987601 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:987600:987600 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:987600:987600 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987600:987600 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987600:987600 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:987600:987600 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:987600:987600 [1] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:987605:987605 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:987605:987605 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987605:987605 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987605:987605 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:987605:987605 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:987605:987605 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:987603:987603 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:987603:987603 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987603:987603 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987603:987603 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:987603:987603 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:987603:987603 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:987602:987602 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:987602:987602 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987602:987602 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987602:987602 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:987602:987602 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:987602:987602 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:987606:987606 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:987606:987606 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987606:987606 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987606:987606 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:987606:987606 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:987606:987606 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:987604:987604 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:987604:987604 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987604:987604 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987604:987604 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:987604:987604 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:987604:987604 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO ncclCommInitRank comm 0x557a21bbf7d0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x23e63267fb4f81f2 - Init START +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO ncclCommInitRank comm 0x55f68cf03d10 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x23e63267fb4f81f2 - Init START +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO ncclCommInitRank comm 0x55a84bebdbf0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x23e63267fb4f81f2 - Init START +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO ncclCommInitRank comm 0x558472850560 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x23e63267fb4f81f2 - Init START +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO ncclCommInitRank comm 0x5633b7a032e0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x23e63267fb4f81f2 - Init START +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO ncclCommInitRank comm 0x5630fbb93050 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x23e63267fb4f81f2 - Init START +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO ncclCommInitRank comm 0x559a06d17ba0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x23e63267fb4f81f2 - Init START +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO ncclCommInitRank comm 0x5622c7b04d20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x23e63267fb4f81f2 - Init START +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO comm 0x557a21bbf7d0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO comm 0x5630fbb93050 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO comm 0x55f68cf03d10 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO comm 0x558472850560 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO comm 0x5622c7b04d20 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO comm 0x559a06d17ba0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO comm 0x5633b7a032e0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO comm 0x55a84bebdbf0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:987606:989216 [7] NCCL INFO ncclCommInitRank comm 0x55a84bebdbf0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x23e63267fb4f81f2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:987604:989217 [5] NCCL INFO ncclCommInitRank comm 0x557a21bbf7d0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x23e63267fb4f81f2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987602:989215 [3] NCCL INFO ncclCommInitRank comm 0x55f68cf03d10 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x23e63267fb4f81f2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987599:989181 [0] NCCL INFO ncclCommInitRank comm 0x5633b7a032e0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x23e63267fb4f81f2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987600:989212 [1] NCCL INFO ncclCommInitRank comm 0x559a06d17ba0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x23e63267fb4f81f2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:987601:989182 [2] NCCL INFO ncclCommInitRank comm 0x5622c7b04d20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x23e63267fb4f81f2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:987603:989214 [4] NCCL INFO ncclCommInitRank comm 0x5630fbb93050 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x23e63267fb4f81f2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:987605:989213 [6] NCCL INFO ncclCommInitRank comm 0x558472850560 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x23e63267fb4f81f2 - Init COMPLETE +[2025-10-13 19:13:39,492] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 19:13:41,288] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 19:13:59,366 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 19:13:59,371 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:987601:994232 [2] NCCL INFO ncclCommInitRank comm 0x7fa17006aca0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xf6c23e1988e18f27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987599:994227 [0] NCCL INFO ncclCommInitRank comm 0x7fbe0406b280 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xf6c23e1988e18f27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987603:994234 [4] NCCL INFO ncclCommInitRank comm 0x7fec4006af80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xf6c23e1988e18f27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987606:994231 [7] NCCL INFO ncclCommInitRank comm 0x7fd728069fa0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xf6c23e1988e18f27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987605:994229 [6] NCCL INFO ncclCommInitRank comm 0x7f4e8806ab50 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xf6c23e1988e18f27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987602:994233 [3] NCCL INFO ncclCommInitRank comm 0x7f2c3c06aa80 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xf6c23e1988e18f27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987600:994230 [1] NCCL INFO ncclCommInitRank comm 0x7f636406af10 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xf6c23e1988e18f27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:987604:994228 [5] NCCL INFO ncclCommInitRank comm 0x7efb7406ac80 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xf6c23e1988e18f27 - Init COMPLETE + 0%| | 1/520 [00:14<2:02:12, 14.13s/it] {'loss': 2.0824, 'grad_norm': 0.016665170868778882, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:02:12, 14.13s/it] 0%| | 2/520 [00:17<1:09:21, 8.03s/it] {'loss': 2.0828, 'grad_norm': 0.0179845041662615, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:21, 8.03s/it] 1%| | 3/520 [00:21<52:41, 6.11s/it] {'loss': 2.2283, 'grad_norm': 0.02060147778855983, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<52:41, 6.11s/it] 1%| | 4/520 [00:25<44:52, 5.22s/it] {'loss': 1.687, 'grad_norm': 0.005395084358805062, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<44:52, 5.22s/it] 1%| | 5/520 [00:29<40:31, 4.72s/it] {'loss': 1.7026, 'grad_norm': 0.005601958939019072, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:31, 4.72s/it] 1%| | 6/520 [00:33<37:56, 4.43s/it] {'loss': 1.4055, 'grad_norm': 0.0024168773695884896, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:33<37:56, 4.43s/it] 1%|▏ | 7/520 [00:37<36:18, 4.25s/it] {'loss': 1.4638, 'grad_norm': 0.0031404602811721176, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:37<36:18, 4.25s/it] 2%|▏ | 8/520 [00:41<36:44, 4.31s/it] {'loss': 1.4807, 'grad_norm': 0.002768433574023963, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:44, 4.31s/it] 2%|▏ | 9/520 [00:45<36:36, 4.30s/it] {'loss': 1.5537, 'grad_norm': 0.0023770128252760567, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<36:36, 4.30s/it] 2%|▏ | 10/520 [00:49<35:12, 4.14s/it] {'loss': 1.3797, 'grad_norm': 0.0019332141322609437, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<35:12, 4.14s/it] 2%|▏ | 11/520 [00:53<34:44, 4.10s/it] {'loss': 1.4499, 'grad_norm': 0.0021655698998717574, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:44, 4.10s/it] 2%|▏ | 12/520 [00:57<33:48, 3.99s/it] {'loss': 1.3575, 'grad_norm': 0.0022304722454124233, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<33:48, 3.99s/it][2025-10-13 19:15:06,068] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<34:57, 4.14s/it] {'loss': 1.3911, 'grad_norm': 0.002297723930373371, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<34:57, 4.14s/it] 3%|▎ | 14/520 [01:05<34:08, 4.05s/it] {'loss': 1.4287, 'grad_norm': 0.0017610352805347861, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<34:08, 4.05s/it] 3%|▎ | 15/520 [01:09<33:38, 4.00s/it] {'loss': 1.3946, 'grad_norm': 0.0013124472377384845, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:09<33:38, 4.00s/it] 3%|▎ | 16/520 [01:13<33:05, 3.94s/it] {'loss': 1.3839, 'grad_norm': 0.001994117221469018, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:13<33:05, 3.94s/it] 3%|▎ | 17/520 [01:17<32:42, 3.90s/it] {'loss': 1.4636, 'grad_norm': 0.0018957734538322221, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:17<32:42, 3.90s/it] 3%|▎ | 18/520 [01:20<32:07, 3.84s/it] {'loss': 1.312, 'grad_norm': 0.001475558397739344, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:20<32:07, 3.84s/it] 4%|▎ | 19/520 [01:24<31:43, 3.80s/it] {'loss': 1.3501, 'grad_norm': 0.0013428598083171225, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:24<31:43, 3.80s/it] 4%|▍ | 20/520 [01:28<31:40, 3.80s/it] {'loss': 1.2979, 'grad_norm': 0.00153044540810629, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:28<31:40, 3.80s/it] 4%|▍ | 21/520 [01:32<31:46, 3.82s/it] {'loss': 1.3439, 'grad_norm': 0.0016309109303863718, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:32<31:46, 3.82s/it] 4%|▍ | 22/520 [01:36<31:48, 3.83s/it] {'loss': 1.448, 'grad_norm': 0.0013804194302968335, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:36<31:48, 3.83s/it] 4%|▍ | 23/520 [01:39<31:47, 3.84s/it] {'loss': 1.393, 'grad_norm': 0.0013016038660144536, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:39<31:47, 3.84s/it] 5%|▍ | 24/520 [01:43<31:22, 3.79s/it] {'loss': 1.3284, 'grad_norm': 0.00142567703870315, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:43<31:22, 3.79s/it] 5%|▍ | 25/520 [01:47<30:53, 3.74s/it] {'loss': 1.3863, 'grad_norm': 0.001412077589711011, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:47<30:53, 3.74s/it] 5%|▌ | 26/520 [01:51<30:43, 3.73s/it] {'loss': 1.3561, 'grad_norm': 0.0012462497614633117, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:51<30:43, 3.73s/it] 5%|▌ | 27/520 [01:54<30:29, 3.71s/it] {'loss': 1.2788, 'grad_norm': 0.001269185930935037, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:54<30:29, 3.71s/it] 5%|▌ | 28/520 [01:58<30:24, 3.71s/it] {'loss': 1.2933, 'grad_norm': 0.0012973020155920996, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:58<30:24, 3.71s/it] 6%|▌ | 29/520 [02:02<30:17, 3.70s/it] {'loss': 1.3229, 'grad_norm': 0.0013892332293668332, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:02<30:17, 3.70s/it] 6%|▌ | 30/520 [02:05<30:13, 3.70s/it] {'loss': 1.4005, 'grad_norm': 0.001184749450551891, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:05<30:13, 3.70s/it] 6%|▌ | 31/520 [02:09<30:06, 3.69s/it] {'loss': 1.2935, 'grad_norm': 0.00115262205274598, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:09<30:06, 3.69s/it] 6%|▌ | 32/520 [02:13<30:09, 3.71s/it] {'loss': 1.2393, 'grad_norm': 0.0011551160799861166, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:13<30:09, 3.71s/it] 6%|▋ | 33/520 [02:17<30:22, 3.74s/it] {'loss': 1.2959, 'grad_norm': 0.0013897286128557581, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:17<30:22, 3.74s/it] 7%|▋ | 34/520 [02:20<30:47, 3.80s/it] {'loss': 1.2953, 'grad_norm': 0.0015467734210601082, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:20<30:47, 3.80s/it] 7%|▋ | 35/520 [02:24<31:00, 3.84s/it] {'loss': 1.2952, 'grad_norm': 0.0015264339409488891, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:24<31:00, 3.84s/it] 7%|▋ | 36/520 [02:28<31:01, 3.85s/it] {'loss': 1.3853, 'grad_norm': 0.0012489115280999439, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:28<31:01, 3.85s/it] 7%|▋ | 37/520 [02:32<30:40, 3.81s/it] {'loss': 1.3771, 'grad_norm': 0.001315645549286764, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:32<30:40, 3.81s/it] 7%|▋ | 38/520 [02:36<30:16, 3.77s/it] {'loss': 1.4638, 'grad_norm': 0.001331103878396419, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:36<30:16, 3.77s/it] 8%|▊ | 39/520 [02:39<30:05, 3.75s/it] {'loss': 1.32, 'grad_norm': 0.0014521852793478965, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:39<30:05, 3.75s/it] 8%|▊ | 40/520 [02:43<30:15, 3.78s/it] {'loss': 1.352, 'grad_norm': 0.0011908286814028926, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:43<30:15, 3.78s/it] 8%|▊ | 41/520 [02:47<30:35, 3.83s/it] {'loss': 1.3309, 'grad_norm': 0.0012879215819825531, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:47<30:35, 3.83s/it] 8%|▊ | 42/520 [02:51<30:48, 3.87s/it] {'loss': 1.3315, 'grad_norm': 0.0015424014865416346, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:51<30:48, 3.87s/it] 8%|▊ | 43/520 [02:55<30:55, 3.89s/it] {'loss': 1.2757, 'grad_norm': 0.0012407107824150088, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:55<30:55, 3.89s/it] 8%|▊ | 44/520 [02:59<30:58, 3.90s/it] {'loss': 1.3697, 'grad_norm': 0.0012469802485275308, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:59<30:58, 3.90s/it] 9%|▊ | 45/520 [03:03<30:57, 3.91s/it] {'loss': 1.3445, 'grad_norm': 0.00130486665785509, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:03<30:57, 3.91s/it] 9%|▉ | 46/520 [03:07<30:53, 3.91s/it] {'loss': 1.4186, 'grad_norm': 0.001176898352642073, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:07<30:53, 3.91s/it] 9%|▉ | 47/520 [03:11<30:47, 3.91s/it] {'loss': 1.3288, 'grad_norm': 0.001288870838669772, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:11<30:47, 3.91s/it] 9%|▉ | 48/520 [03:15<30:44, 3.91s/it] {'loss': 1.311, 'grad_norm': 0.0013460721155882655, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:15<30:44, 3.91s/it] 9%|▉ | 49/520 [03:19<30:41, 3.91s/it] {'loss': 1.3557, 'grad_norm': 0.0012724997034724518, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:19<30:41, 3.91s/it] 10%|▉ | 50/520 [03:22<30:37, 3.91s/it] {'loss': 1.346, 'grad_norm': 0.0011925647445793792, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:22<30:37, 3.91s/it] 10%|▉ | 51/520 [03:26<30:33, 3.91s/it] {'loss': 1.2827, 'grad_norm': 0.0013894231316571436, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:26<30:33, 3.91s/it] 10%|█ | 52/520 [03:30<30:32, 3.92s/it] {'loss': 1.4082, 'grad_norm': 0.0013268791460901523, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:30<30:32, 3.92s/it] 10%|█ | 53/520 [03:34<30:06, 3.87s/it] {'loss': 1.3896, 'grad_norm': 0.00137124660962061, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:34<30:06, 3.87s/it] 10%|█ | 54/520 [03:38<29:33, 3.81s/it] {'loss': 1.314, 'grad_norm': 0.00120657123792871, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:38<29:33, 3.81s/it] 11%|█ | 55/520 [03:41<29:13, 3.77s/it] {'loss': 1.2868, 'grad_norm': 0.0014016603722095896, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:41<29:13, 3.77s/it] 11%|█ | 56/520 [03:45<28:56, 3.74s/it] {'loss': 1.4124, 'grad_norm': 0.0012739300947219544, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:45<28:56, 3.74s/it] 11%|█ | 57/520 [03:49<28:46, 3.73s/it] {'loss': 1.2728, 'grad_norm': 0.001453289751274137, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:49<28:46, 3.73s/it] 11%|█ | 58/520 [03:52<28:35, 3.71s/it] {'loss': 1.4333, 'grad_norm': 0.0011185709481504795, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:52<28:35, 3.71s/it] 11%|█▏ | 59/520 [03:56<28:24, 3.70s/it] {'loss': 1.2498, 'grad_norm': 0.0011650711960315282, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:56<28:24, 3.70s/it] 12%|█▏ | 60/520 [04:00<28:17, 3.69s/it] {'loss': 1.3398, 'grad_norm': 0.0011811702961160694, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:00<28:17, 3.69s/it] 12%|█▏ | 61/520 [04:03<28:10, 3.68s/it] {'loss': 1.3295, 'grad_norm': 0.0013032746894527506, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:03<28:10, 3.68s/it] 12%|█▏ | 62/520 [04:07<27:59, 3.67s/it] {'loss': 1.322, 'grad_norm': 0.0013027079760704488, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:07<27:59, 3.67s/it] 12%|█▏ | 63/520 [04:11<27:52, 3.66s/it] {'loss': 1.3094, 'grad_norm': 0.0011123671470453526, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:11<27:52, 3.66s/it] 12%|█▏ | 64/520 [04:14<27:45, 3.65s/it] {'loss': 1.339, 'grad_norm': 0.001217104140548159, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:14<27:45, 3.65s/it] 12%|█▎ | 65/520 [04:18<27:45, 3.66s/it] {'loss': 1.3473, 'grad_norm': 0.0014565011967434937, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:18<27:45, 3.66s/it] 13%|█▎ | 66/520 [04:22<27:43, 3.66s/it] {'loss': 1.2988, 'grad_norm': 0.0012085188850089919, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:22<27:43, 3.66s/it] 13%|█▎ | 67/520 [04:25<27:40, 3.67s/it] {'loss': 1.2062, 'grad_norm': 0.0013159381717212236, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:25<27:40, 3.67s/it] 13%|█▎ | 68/520 [04:29<27:39, 3.67s/it] {'loss': 1.272, 'grad_norm': 0.001190772502870409, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:29<27:39, 3.67s/it] 13%|█▎ | 69/520 [04:33<27:34, 3.67s/it] {'loss': 1.2553, 'grad_norm': 0.0014502334172523232, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:33<27:34, 3.67s/it] 13%|█▎ | 70/520 [04:36<27:27, 3.66s/it] {'loss': 1.2844, 'grad_norm': 0.0013917550281830346, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:36<27:27, 3.66s/it] 14%|█▎ | 71/520 [04:40<27:31, 3.68s/it] {'loss': 1.2263, 'grad_norm': 0.0010693786765881932, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:40<27:31, 3.68s/it] 14%|█▍ | 72/520 [04:44<28:16, 3.79s/it] {'loss': 1.3727, 'grad_norm': 0.0013226812552141357, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:44<28:16, 3.79s/it] 14%|█▍ | 73/520 [04:48<29:05, 3.90s/it] {'loss': 1.2049, 'grad_norm': 0.001246845355858934, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:48<29:05, 3.90s/it] 14%|█▍ | 74/520 [04:52<29:40, 3.99s/it] {'loss': 1.3163, 'grad_norm': 0.0012149865143049594, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:52<29:40, 3.99s/it] 14%|█▍ | 75/520 [04:57<30:03, 4.05s/it] {'loss': 1.2279, 'grad_norm': 0.0010672222602575543, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:57<30:03, 4.05s/it] 15%|█▍ | 76/520 [05:01<30:17, 4.09s/it] {'loss': 1.3717, 'grad_norm': 0.001183971603388088, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:01<30:17, 4.09s/it] 15%|█▍ | 77/520 [05:05<30:27, 4.12s/it] {'loss': 1.1463, 'grad_norm': 0.0012827962543021769, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:05<30:27, 4.12s/it] 15%|█▌ | 78/520 [05:09<30:21, 4.12s/it] {'loss': 1.2616, 'grad_norm': 0.0012434398335199723, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:09<30:21, 4.12s/it] 15%|█▌ | 79/520 [05:13<29:44, 4.05s/it] {'loss': 1.2519, 'grad_norm': 0.00116656907725698, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:13<29:44, 4.05s/it] 15%|█▌ | 80/520 [05:17<29:22, 4.01s/it] {'loss': 1.3571, 'grad_norm': 0.001200904378857407, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:17<29:22, 4.01s/it] 16%|█▌ | 81/520 [05:21<29:05, 3.98s/it] {'loss': 1.3916, 'grad_norm': 0.0018261860610387579, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:21<29:05, 3.98s/it] 16%|█▌ | 82/520 [05:25<28:50, 3.95s/it] {'loss': 1.3179, 'grad_norm': 0.0011410514008444042, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:25<28:50, 3.95s/it] 16%|█▌ | 83/520 [05:29<28:41, 3.94s/it] {'loss': 1.3314, 'grad_norm': 0.0012942270330407817, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:29<28:41, 3.94s/it] 16%|█▌ | 84/520 [05:33<28:29, 3.92s/it] {'loss': 1.3454, 'grad_norm': 0.001249806737845051, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:33<28:29, 3.92s/it] 16%|█▋ | 85/520 [05:36<28:20, 3.91s/it] {'loss': 1.3875, 'grad_norm': 0.0012345086448470668, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:36<28:20, 3.91s/it] 17%|█▋ | 86/520 [05:40<28:15, 3.91s/it] {'loss': 1.3782, 'grad_norm': 0.0011970613039907825, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:40<28:15, 3.91s/it] 17%|█▋ | 87/520 [05:44<28:13, 3.91s/it] {'loss': 1.3044, 'grad_norm': 0.0010925897725590301, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:44<28:13, 3.91s/it] 17%|█▋ | 88/520 [05:48<28:10, 3.91s/it] {'loss': 1.2518, 'grad_norm': 0.0009513127471689492, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:48<28:10, 3.91s/it] 17%|█▋ | 89/520 [05:52<28:03, 3.91s/it] {'loss': 1.3364, 'grad_norm': 0.0012814373504994856, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:52<28:03, 3.91s/it] 17%|█▋ | 90/520 [05:56<28:01, 3.91s/it] {'loss': 1.2685, 'grad_norm': 0.0011510566740740825, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:56<28:01, 3.91s/it] 18%|█▊ | 91/520 [06:00<27:58, 3.91s/it] {'loss': 1.3428, 'grad_norm': 0.0011216648056883401, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:00<27:58, 3.91s/it] 18%|█▊ | 92/520 [06:04<27:52, 3.91s/it] {'loss': 1.2854, 'grad_norm': 0.0012405316033810207, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:04<27:52, 3.91s/it] 18%|█▊ | 93/520 [06:08<27:25, 3.85s/it] {'loss': 1.2996, 'grad_norm': 0.0014209508790894148, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:08<27:25, 3.85s/it] 18%|█▊ | 94/520 [06:11<26:58, 3.80s/it] {'loss': 1.3678, 'grad_norm': 0.0011628727275597099, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:11<26:58, 3.80s/it] 18%|█▊ | 95/520 [06:15<26:38, 3.76s/it] {'loss': 1.2774, 'grad_norm': 0.001490712296143162, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:15<26:38, 3.76s/it] 18%|█▊ | 96/520 [06:19<26:23, 3.74s/it] {'loss': 1.2845, 'grad_norm': 0.0009694190222523342, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:19<26:23, 3.74s/it] 19%|█▊ | 97/520 [06:22<26:20, 3.74s/it] {'loss': 1.2527, 'grad_norm': 0.001488409061393796, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:22<26:20, 3.74s/it] 19%|█▉ | 98/520 [06:26<26:38, 3.79s/it] {'loss': 1.2594, 'grad_norm': 0.0011467528617552282, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:26<26:38, 3.79s/it] 19%|█▉ | 99/520 [06:30<26:51, 3.83s/it] {'loss': 1.2726, 'grad_norm': 0.001380432924423014, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:30<26:51, 3.83s/it] 19%|█▉ | 100/520 [06:34<26:57, 3.85s/it] {'loss': 1.2469, 'grad_norm': 0.0010936943977610115, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:34<26:57, 3.85s/it] 19%|█▉ | 101/520 [06:38<26:59, 3.86s/it] {'loss': 1.2756, 'grad_norm': 0.001192319969286058, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:38<26:59, 3.86s/it] 20%|█▉ | 102/520 [06:42<27:00, 3.88s/it] {'loss': 1.2828, 'grad_norm': 0.0012294925306015054, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:42<27:00, 3.88s/it] 20%|█▉ | 103/520 [06:46<26:59, 3.88s/it] {'loss': 1.2082, 'grad_norm': 0.0011090341071466243, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:46<26:59, 3.88s/it] 20%|██ | 104/520 [06:50<26:58, 3.89s/it] {'loss': 1.2839, 'grad_norm': 0.0011577187526806468, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:50<26:58, 3.89s/it] 20%|██ | 105/520 [06:54<26:56, 3.89s/it] {'loss': 1.2762, 'grad_norm': 0.0010777603572646355, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:54<26:56, 3.89s/it] 20%|██ | 106/520 [06:57<26:52, 3.89s/it] {'loss': 1.2766, 'grad_norm': 0.0010544679593209275, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:57<26:52, 3.89s/it] 21%|██ | 107/520 [07:01<26:47, 3.89s/it] {'loss': 1.2543, 'grad_norm': 0.0011612124293851258, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [07:01<26:47, 3.89s/it] 21%|██ | 108/520 [07:05<26:44, 3.89s/it] {'loss': 1.2365, 'grad_norm': 0.0011852687216036232, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [07:05<26:44, 3.89s/it] 21%|██ | 109/520 [07:09<26:40, 3.89s/it] {'loss': 1.2294, 'grad_norm': 0.0009757687678285244, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:09<26:40, 3.89s/it] 21%|██ | 110/520 [07:13<26:34, 3.89s/it] {'loss': 1.4162, 'grad_norm': 0.001206659669867598, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:13<26:34, 3.89s/it] 21%|██▏ | 111/520 [07:17<26:28, 3.88s/it] {'loss': 1.4138, 'grad_norm': 0.001210404691526731, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:17<26:28, 3.88s/it] 22%|██▏ | 112/520 [07:21<26:25, 3.89s/it] {'loss': 1.298, 'grad_norm': 0.0010914279687829138, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:21<26:25, 3.89s/it] 22%|██▏ | 113/520 [07:25<26:15, 3.87s/it] {'loss': 1.1916, 'grad_norm': 0.0010652980318633025, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:25<26:15, 3.87s/it] 22%|██▏ | 114/520 [07:28<25:48, 3.81s/it] {'loss': 1.2866, 'grad_norm': 0.0011188974531923079, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:28<25:48, 3.81s/it] 22%|██▏ | 115/520 [07:32<25:29, 3.78s/it] {'loss': 1.3848, 'grad_norm': 0.0011276476604345136, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:32<25:29, 3.78s/it] 22%|██▏ | 116/520 [07:36<25:15, 3.75s/it] {'loss': 1.3951, 'grad_norm': 0.0011224502430845808, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:36<25:15, 3.75s/it] 22%|██▎ | 117/520 [07:39<25:09, 3.74s/it] {'loss': 1.3602, 'grad_norm': 0.0012330916937227099, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:39<25:09, 3.74s/it] 23%|██▎ | 118/520 [07:43<24:57, 3.72s/it] {'loss': 1.2783, 'grad_norm': 0.0010514246392299691, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:43<24:57, 3.72s/it] 23%|██▎ | 119/520 [07:47<24:51, 3.72s/it] {'loss': 1.235, 'grad_norm': 0.001251016064676071, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:47<24:51, 3.72s/it] 23%|██▎ | 120/520 [07:50<24:43, 3.71s/it] {'loss': 1.2434, 'grad_norm': 0.0016257745795331114, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:50<24:43, 3.71s/it] 23%|██▎ | 121/520 [07:54<24:37, 3.70s/it] {'loss': 1.3008, 'grad_norm': 0.0014271674648219214, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:54<24:37, 3.70s/it] 23%|██▎ | 122/520 [07:58<24:28, 3.69s/it] {'loss': 1.2069, 'grad_norm': 0.0010651983151123198, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:58<24:28, 3.69s/it] 24%|██▎ | 123/520 [08:02<24:27, 3.70s/it] {'loss': 1.3195, 'grad_norm': 0.0010694107190488823, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [08:02<24:27, 3.70s/it] 24%|██▍ | 124/520 [08:05<24:23, 3.70s/it] {'loss': 1.265, 'grad_norm': 0.0011645381513714704, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:05<24:23, 3.70s/it] 24%|██▍ | 125/520 [08:09<24:19, 3.69s/it] {'loss': 1.2609, 'grad_norm': 0.0011603654279864213, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:09<24:19, 3.69s/it] 24%|██▍ | 126/520 [08:13<25:35, 3.90s/it] {'loss': 1.2464, 'grad_norm': 0.0010222987818167845, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:13<25:35, 3.90s/it] 24%|██▍ | 127/520 [08:17<25:06, 3.83s/it] {'loss': 1.2397, 'grad_norm': 0.0012856042769414946, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:17<25:06, 3.83s/it] 25%|██▍ | 128/520 [08:21<24:47, 3.80s/it] {'loss': 1.2842, 'grad_norm': 0.0012198816853502556, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:21<24:47, 3.80s/it] 25%|██▍ | 129/520 [08:24<24:30, 3.76s/it] {'loss': 1.2513, 'grad_norm': 0.0011106727834571336, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:24<24:30, 3.76s/it] 25%|██▌ | 130/520 [08:28<24:16, 3.74s/it] {'loss': 1.275, 'grad_norm': 0.0009673994341599867, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:28<24:16, 3.74s/it] 25%|██▌ | 131/520 [08:32<24:06, 3.72s/it] {'loss': 1.2178, 'grad_norm': 0.0009812270085556736, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:32<24:06, 3.72s/it] 25%|██▌ | 132/520 [08:35<23:58, 3.71s/it] {'loss': 1.3236, 'grad_norm': 0.001179104526756532, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:35<23:58, 3.71s/it] 26%|██▌ | 133/520 [08:39<23:52, 3.70s/it] {'loss': 1.2445, 'grad_norm': 0.0012169977388335129, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:39<23:52, 3.70s/it] 26%|██▌ | 134/520 [08:43<23:55, 3.72s/it] {'loss': 1.3187, 'grad_norm': 0.0011371951313442472, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:43<23:55, 3.72s/it] 26%|██▌ | 135/520 [08:47<23:47, 3.71s/it] {'loss': 1.3698, 'grad_norm': 0.0010971021383853875, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:47<23:47, 3.71s/it] 26%|██▌ | 136/520 [08:50<23:43, 3.71s/it] {'loss': 1.3166, 'grad_norm': 0.0010751691696788209, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:50<23:43, 3.71s/it] 26%|██▋ | 137/520 [08:54<23:31, 3.69s/it] {'loss': 1.2325, 'grad_norm': 0.0013326621740806903, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:54<23:31, 3.69s/it] 27%|██▋ | 138/520 [08:57<23:22, 3.67s/it] {'loss': 1.2467, 'grad_norm': 0.0009998183009795676, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:57<23:22, 3.67s/it] 27%|██▋ | 139/520 [09:01<23:18, 3.67s/it] {'loss': 1.129, 'grad_norm': 0.0011943113948925505, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:01<23:18, 3.67s/it] 27%|██▋ | 140/520 [09:05<23:12, 3.66s/it] {'loss': 1.2654, 'grad_norm': 0.0010368510948453697, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:05<23:12, 3.66s/it] 27%|██▋ | 141/520 [09:08<23:06, 3.66s/it] {'loss': 1.3503, 'grad_norm': 0.001043369596365837, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:08<23:06, 3.66s/it] 27%|██▋ | 142/520 [09:12<23:06, 3.67s/it] {'loss': 1.2763, 'grad_norm': 0.000989730625027412, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:12<23:06, 3.67s/it] 28%|██▊ | 143/520 [09:16<23:01, 3.66s/it] {'loss': 1.2795, 'grad_norm': 0.0013544500113167265, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:16<23:01, 3.66s/it] 28%|██▊ | 144/520 [09:19<22:55, 3.66s/it] {'loss': 1.2471, 'grad_norm': 0.0011325706898461753, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:19<22:55, 3.66s/it] 28%|██▊ | 145/520 [09:23<22:56, 3.67s/it] {'loss': 1.178, 'grad_norm': 0.0009874106172689185, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:23<22:56, 3.67s/it] 28%|██▊ | 146/520 [09:27<22:51, 3.67s/it] {'loss': 1.3349, 'grad_norm': 0.0010313805141876889, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:27<22:51, 3.67s/it] 28%|██▊ | 147/520 [09:30<22:50, 3.67s/it] {'loss': 1.2235, 'grad_norm': 0.0011358084679227558, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:30<22:50, 3.67s/it] 28%|██▊ | 148/520 [09:34<22:41, 3.66s/it] {'loss': 1.2467, 'grad_norm': 0.0010297308684620362, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:34<22:41, 3.66s/it] 29%|██▊ | 149/520 [09:38<22:38, 3.66s/it] {'loss': 1.1868, 'grad_norm': 0.0010299115118381705, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:38<22:38, 3.66s/it] 29%|██▉ | 150/520 [09:41<22:32, 3.65s/it] {'loss': 1.4153, 'grad_norm': 0.0010966480286452851, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:41<22:32, 3.65s/it] 29%|██▉ | 151/520 [09:45<22:29, 3.66s/it] {'loss': 1.2349, 'grad_norm': 0.0010437041144659921, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:45<22:29, 3.66s/it] 29%|██▉ | 152/520 [09:49<22:24, 3.65s/it] {'loss': 1.21, 'grad_norm': 0.0011140301560103281, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:49<22:24, 3.65s/it] 29%|██▉ | 153/520 [09:52<22:22, 3.66s/it] {'loss': 1.2429, 'grad_norm': 0.0010442504366711043, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:52<22:22, 3.66s/it] 30%|██▉ | 154/520 [09:56<22:19, 3.66s/it] {'loss': 1.3245, 'grad_norm': 0.0010419816928728275, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:56<22:19, 3.66s/it] 30%|██▉ | 155/520 [10:00<22:18, 3.67s/it] {'loss': 1.2403, 'grad_norm': 0.0010824686402013265, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:00<22:18, 3.67s/it] 30%|███ | 156/520 [10:03<22:23, 3.69s/it] {'loss': 1.2602, 'grad_norm': 0.0011667388842293497, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [10:03<22:23, 3.69s/it] 30%|███ | 157/520 [10:07<22:15, 3.68s/it] {'loss': 1.3265, 'grad_norm': 0.0010688009998924344, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:07<22:15, 3.68s/it] 30%|███ | 158/520 [10:11<22:10, 3.68s/it] {'loss': 1.2437, 'grad_norm': 0.0011170424325136219, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:11<22:10, 3.68s/it] 31%|███ | 159/520 [10:14<22:02, 3.66s/it] {'loss': 1.2912, 'grad_norm': 0.0010476577453510777, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:14<22:02, 3.66s/it] 31%|███ | 160/520 [10:18<21:58, 3.66s/it] {'loss': 1.2947, 'grad_norm': 0.0010973741095126172, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:18<21:58, 3.66s/it] 31%|███ | 161/520 [10:22<21:55, 3.67s/it] {'loss': 1.2724, 'grad_norm': 0.0010977376809910972, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:22<21:55, 3.67s/it] 31%|███ | 162/520 [10:25<21:52, 3.67s/it] {'loss': 1.2641, 'grad_norm': 0.0010190059526343801, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:25<21:52, 3.67s/it] 31%|███▏ | 163/520 [10:29<21:47, 3.66s/it] {'loss': 1.169, 'grad_norm': 0.0012919767019857462, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:29<21:47, 3.66s/it] 32%|███▏ | 164/520 [10:33<21:41, 3.66s/it] {'loss': 1.1365, 'grad_norm': 0.0009941062373301794, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:33<21:41, 3.66s/it] 32%|███▏ | 165/520 [10:36<21:43, 3.67s/it] {'loss': 1.2782, 'grad_norm': 0.0009871345099837301, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:36<21:43, 3.67s/it] 32%|███▏ | 166/520 [10:40<21:37, 3.66s/it] {'loss': 1.253, 'grad_norm': 0.0011634546562568416, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:40<21:37, 3.66s/it] 32%|███▏ | 167/520 [10:44<21:42, 3.69s/it] {'loss': 1.247, 'grad_norm': 0.0011144077848027317, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:44<21:42, 3.69s/it] 32%|███▏ | 168/520 [10:47<21:32, 3.67s/it] {'loss': 1.1928, 'grad_norm': 0.001037749366492591, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:47<21:32, 3.67s/it] 32%|███▎ | 169/520 [10:51<21:32, 3.68s/it] {'loss': 1.2677, 'grad_norm': 0.0011314558781922784, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:51<21:32, 3.68s/it] 33%|███▎ | 170/520 [10:55<21:26, 3.68s/it] {'loss': 1.2143, 'grad_norm': 0.0010488554238124801, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:55<21:26, 3.68s/it] 33%|███▎ | 171/520 [10:58<21:19, 3.66s/it] {'loss': 1.1991, 'grad_norm': 0.0011246750087943631, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:58<21:19, 3.66s/it] 33%|███▎ | 172/520 [11:02<21:13, 3.66s/it] {'loss': 1.2744, 'grad_norm': 0.001013811056261301, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:02<21:13, 3.66s/it] 33%|███▎ | 173/520 [11:06<21:11, 3.67s/it] {'loss': 1.217, 'grad_norm': 0.0010488546563899393, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:06<21:11, 3.67s/it] 33%|███▎ | 174/520 [11:09<21:09, 3.67s/it] {'loss': 1.2648, 'grad_norm': 0.0011174306823510268, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:09<21:09, 3.67s/it] 34%|███▎ | 175/520 [11:13<21:09, 3.68s/it] {'loss': 1.1813, 'grad_norm': 0.0009697212728385041, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:13<21:09, 3.68s/it] 34%|███▍ | 176/520 [11:17<21:03, 3.67s/it] {'loss': 1.2952, 'grad_norm': 0.0010821014045627154, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:17<21:03, 3.67s/it] 34%|███▍ | 177/520 [11:21<21:08, 3.70s/it] {'loss': 1.1703, 'grad_norm': 0.0011179832840694768, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:21<21:08, 3.70s/it] 34%|███▍ | 178/520 [11:24<21:07, 3.71s/it] {'loss': 1.2466, 'grad_norm': 0.0012004138145671286, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:24<21:07, 3.71s/it] 34%|███▍ | 179/520 [11:28<21:04, 3.71s/it] {'loss': 1.3244, 'grad_norm': 0.0010370642771043415, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:28<21:04, 3.71s/it] 35%|███▍ | 180/520 [11:32<21:02, 3.71s/it] {'loss': 1.2454, 'grad_norm': 0.0010968764749301823, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:32<21:02, 3.71s/it] 35%|███▍ | 181/520 [11:35<20:57, 3.71s/it] {'loss': 1.2193, 'grad_norm': 0.000975393960413772, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:35<20:57, 3.71s/it] 35%|███▌ | 182/520 [11:39<20:58, 3.72s/it] {'loss': 1.2296, 'grad_norm': 0.0010557528732217237, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:39<20:58, 3.72s/it] 35%|███▌ | 183/520 [11:43<20:52, 3.72s/it] {'loss': 1.2528, 'grad_norm': 0.0009948318212427412, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:43<20:52, 3.72s/it] 35%|███▌ | 184/520 [11:47<20:53, 3.73s/it] {'loss': 1.1876, 'grad_norm': 0.0011063494251081117, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:47<20:53, 3.73s/it] 36%|███▌ | 185/520 [11:50<20:46, 3.72s/it] {'loss': 1.328, 'grad_norm': 0.001063183945856807, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:50<20:46, 3.72s/it] 36%|███▌ | 186/520 [11:54<20:35, 3.70s/it] {'loss': 1.21, 'grad_norm': 0.001078718345759629, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:54<20:35, 3.70s/it] 36%|███▌ | 187/520 [11:58<20:30, 3.69s/it] {'loss': 1.208, 'grad_norm': 0.0011802881301845398, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:58<20:30, 3.69s/it] 36%|███▌ | 188/520 [12:01<20:35, 3.72s/it] {'loss': 1.2949, 'grad_norm': 0.0010967289151513667, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:01<20:35, 3.72s/it] 36%|███▋ | 189/520 [12:05<20:43, 3.76s/it] {'loss': 1.2994, 'grad_norm': 0.0009816490497117005, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:05<20:43, 3.76s/it] 37%|███▋ | 190/520 [12:09<20:41, 3.76s/it] {'loss': 1.2205, 'grad_norm': 0.0011022830720664642, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:09<20:41, 3.76s/it] 37%|███▋ | 191/520 [12:13<20:42, 3.78s/it] {'loss': 1.179, 'grad_norm': 0.0009423609770897062, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:13<20:42, 3.78s/it] 37%|███▋ | 192/520 [12:17<20:39, 3.78s/it] {'loss': 1.2571, 'grad_norm': 0.000985094018191569, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:17<20:39, 3.78s/it] 37%|███▋ | 193/520 [12:20<20:35, 3.78s/it] {'loss': 1.2174, 'grad_norm': 0.0011916339953782695, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:20<20:35, 3.78s/it] 37%|███▋ | 194/520 [12:24<20:32, 3.78s/it] {'loss': 1.1159, 'grad_norm': 0.0011337011063701327, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:24<20:32, 3.78s/it] 38%|███▊ | 195/520 [12:28<20:29, 3.78s/it] {'loss': 1.2786, 'grad_norm': 0.0010480519990280098, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:28<20:29, 3.78s/it] 38%|███▊ | 196/520 [12:32<20:13, 3.75s/it] {'loss': 1.2527, 'grad_norm': 0.001157182675755794, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:32<20:13, 3.75s/it] 38%|███▊ | 197/520 [12:35<19:58, 3.71s/it] {'loss': 1.2019, 'grad_norm': 0.001024230558001259, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:35<19:58, 3.71s/it] 38%|███▊ | 198/520 [12:39<19:48, 3.69s/it] {'loss': 1.2745, 'grad_norm': 0.0011725454868481858, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:39<19:48, 3.69s/it] 38%|███▊ | 199/520 [12:43<19:57, 3.73s/it] {'loss': 1.1905, 'grad_norm': 0.0010496358436583278, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:43<19:57, 3.73s/it] 38%|███▊ | 200/520 [12:47<20:07, 3.77s/it] {'loss': 1.1665, 'grad_norm': 0.0011468331141604115, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:47<20:07, 3.77s/it] 39%|███▊ | 201/520 [12:51<20:17, 3.82s/it] {'loss': 1.1949, 'grad_norm': 0.00090876811852887, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:51<20:17, 3.82s/it] 39%|███▉ | 202/520 [12:54<20:21, 3.84s/it] {'loss': 1.1959, 'grad_norm': 0.001060747569004445, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:54<20:21, 3.84s/it] 39%|███▉ | 203/520 [12:58<20:19, 3.85s/it] {'loss': 1.2466, 'grad_norm': 0.001112998327724596, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:58<20:19, 3.85s/it] 39%|███▉ | 204/520 [13:02<20:16, 3.85s/it] {'loss': 1.2618, 'grad_norm': 0.0011253942400294397, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:02<20:16, 3.85s/it] 39%|███▉ | 205/520 [13:06<20:14, 3.85s/it] {'loss': 1.1917, 'grad_norm': 0.001018298437780522, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:06<20:14, 3.85s/it] 40%|███▉ | 206/520 [13:10<20:06, 3.84s/it] {'loss': 1.2961, 'grad_norm': 0.0010431636299750502, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:10<20:06, 3.84s/it] 40%|███▉ | 207/520 [13:14<19:46, 3.79s/it] {'loss': 1.1647, 'grad_norm': 0.0010171132593693364, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:14<19:46, 3.79s/it] 40%|████ | 208/520 [13:17<19:28, 3.75s/it] {'loss': 1.288, 'grad_norm': 0.0011748506840246948, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:17<19:28, 3.75s/it] 40%|████ | 209/520 [13:21<19:17, 3.72s/it] {'loss': 1.1966, 'grad_norm': 0.001048786388619226, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:21<19:17, 3.72s/it] 40%|████ | 210/520 [13:25<19:22, 3.75s/it] {'loss': 1.2716, 'grad_norm': 0.0011387633949112813, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:25<19:22, 3.75s/it] 41%|████ | 211/520 [13:28<19:24, 3.77s/it] {'loss': 1.2708, 'grad_norm': 0.001002749398778468, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:29<19:24, 3.77s/it] 41%|████ | 212/520 [13:32<19:27, 3.79s/it] {'loss': 1.2664, 'grad_norm': 0.0010745755291021672, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:32<19:27, 3.79s/it] 41%|████ | 213/520 [13:36<19:24, 3.79s/it] {'loss': 1.224, 'grad_norm': 0.0011894483317972725, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:36<19:24, 3.79s/it] 41%|████ | 214/520 [13:40<19:20, 3.79s/it] {'loss': 1.2046, 'grad_norm': 0.0010455914341118793, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:40<19:20, 3.79s/it] 41%|████▏ | 215/520 [13:44<19:24, 3.82s/it] {'loss': 1.1265, 'grad_norm': 0.0009711111747889122, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:44<19:24, 3.82s/it] 42%|████▏ | 216/520 [13:48<19:21, 3.82s/it] {'loss': 1.1355, 'grad_norm': 0.0010318421848313354, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:48<19:21, 3.82s/it] 42%|████▏ | 217/520 [13:51<19:19, 3.83s/it] {'loss': 1.2543, 'grad_norm': 0.0011322642810852818, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:51<19:19, 3.83s/it] 42%|████▏ | 218/520 [13:55<19:15, 3.83s/it] {'loss': 1.2338, 'grad_norm': 0.0011196670099334347, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:55<19:15, 3.83s/it] 42%|████▏ | 219/520 [13:59<19:10, 3.82s/it] {'loss': 1.2435, 'grad_norm': 0.0009769754268139181, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:59<19:10, 3.82s/it] 42%|████▏ | 220/520 [14:03<19:05, 3.82s/it] {'loss': 1.1739, 'grad_norm': 0.0010318359899870685, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:03<19:05, 3.82s/it] 42%|████▎ | 221/520 [14:07<19:02, 3.82s/it] {'loss': 1.2456, 'grad_norm': 0.001063003261043071, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:07<19:02, 3.82s/it] 43%|████▎ | 222/520 [14:11<19:01, 3.83s/it] {'loss': 1.1821, 'grad_norm': 0.0010390177765355875, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:11<19:01, 3.83s/it] 43%|████▎ | 223/520 [14:14<18:57, 3.83s/it] {'loss': 1.1772, 'grad_norm': 0.0009808412960206146, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:14<18:57, 3.83s/it] 43%|████▎ | 224/520 [14:18<18:51, 3.82s/it] {'loss': 1.2314, 'grad_norm': 0.0009176907029312517, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:18<18:51, 3.82s/it] 43%|████▎ | 225/520 [14:22<18:48, 3.83s/it] {'loss': 1.182, 'grad_norm': 0.0010174092277751379, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:22<18:48, 3.83s/it] 43%|████▎ | 226/520 [14:26<18:45, 3.83s/it] {'loss': 1.2806, 'grad_norm': 0.0010071621418625463, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:26<18:45, 3.83s/it] 44%|████▎ | 227/520 [14:30<18:44, 3.84s/it] {'loss': 1.2663, 'grad_norm': 0.000988156741450993, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:30<18:44, 3.84s/it] 44%|████▍ | 228/520 [14:34<18:39, 3.83s/it] {'loss': 1.276, 'grad_norm': 0.001033239782083129, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:34<18:39, 3.83s/it] 44%|████▍ | 229/520 [14:37<18:28, 3.81s/it] {'loss': 1.2393, 'grad_norm': 0.0009618373412372363, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:37<18:28, 3.81s/it] 44%|████▍ | 230/520 [14:41<18:33, 3.84s/it] {'loss': 1.133, 'grad_norm': 0.0010027868772437619, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:41<18:33, 3.84s/it] 44%|████▍ | 231/520 [14:45<18:33, 3.85s/it] {'loss': 1.1991, 'grad_norm': 0.0009837860355949404, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:45<18:33, 3.85s/it] 45%|████▍ | 232/520 [14:49<18:32, 3.86s/it] {'loss': 1.3136, 'grad_norm': 0.0011344259920354333, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:49<18:32, 3.86s/it] 45%|████▍ | 233/520 [14:53<18:31, 3.87s/it] {'loss': 1.2068, 'grad_norm': 0.0011124051890898444, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:53<18:31, 3.87s/it] 45%|████▌ | 234/520 [14:57<18:28, 3.88s/it] {'loss': 1.1481, 'grad_norm': 0.0011865912657405074, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:57<18:28, 3.88s/it] 45%|████▌ | 235/520 [15:01<18:24, 3.88s/it] {'loss': 1.2026, 'grad_norm': 0.0010700620533312848, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:01<18:24, 3.88s/it] 45%|████▌ | 236/520 [15:05<18:23, 3.89s/it] {'loss': 1.2706, 'grad_norm': 0.000962302943828069, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:05<18:23, 3.89s/it] 46%|████▌ | 237/520 [15:08<18:08, 3.85s/it] {'loss': 1.2759, 'grad_norm': 0.0010663845046936847, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:08<18:08, 3.85s/it] 46%|████▌ | 238/520 [15:12<17:51, 3.80s/it] {'loss': 1.2096, 'grad_norm': 0.0011254270843383857, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:12<17:51, 3.80s/it] 46%|████▌ | 239/520 [15:16<17:37, 3.76s/it] {'loss': 1.2796, 'grad_norm': 0.0010589683344144176, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:16<17:37, 3.76s/it] 46%|████▌ | 240/520 [15:19<17:29, 3.75s/it] {'loss': 1.0907, 'grad_norm': 0.0009957654602824053, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:19<17:29, 3.75s/it] 46%|████▋ | 241/520 [15:23<17:19, 3.72s/it] {'loss': 1.1796, 'grad_norm': 0.0009936859488790559, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:23<17:19, 3.72s/it] 47%|████▋ | 242/520 [15:27<17:12, 3.71s/it] {'loss': 1.1916, 'grad_norm': 0.0009554512100722245, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:27<17:12, 3.71s/it] 47%|████▋ | 243/520 [15:30<17:06, 3.71s/it] {'loss': 1.1898, 'grad_norm': 0.001032626666057989, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:30<17:06, 3.71s/it] 47%|████▋ | 244/520 [15:34<16:58, 3.69s/it] {'loss': 1.2915, 'grad_norm': 0.0010337738074545485, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:34<16:58, 3.69s/it] 47%|████▋ | 245/520 [15:38<16:53, 3.68s/it] {'loss': 1.1658, 'grad_norm': 0.0010473145101456609, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:38<16:53, 3.68s/it] 47%|████▋ | 246/520 [15:42<16:51, 3.69s/it] {'loss': 1.298, 'grad_norm': 0.0010347887794599827, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:42<16:51, 3.69s/it] 48%|████▊ | 247/520 [15:45<16:42, 3.67s/it] {'loss': 1.3413, 'grad_norm': 0.0010820698518573197, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:45<16:42, 3.67s/it] 48%|████▊ | 248/520 [15:49<16:40, 3.68s/it] {'loss': 1.1675, 'grad_norm': 0.0010400135387631825, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:49<16:40, 3.68s/it] 48%|████▊ | 249/520 [15:53<16:37, 3.68s/it] {'loss': 1.2587, 'grad_norm': 0.0010102685766024146, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:53<16:37, 3.68s/it] 48%|████▊ | 250/520 [15:56<16:35, 3.69s/it] {'loss': 1.1955, 'grad_norm': 0.0010575445943662122, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:56<16:35, 3.69s/it] 48%|████▊ | 251/520 [16:00<16:29, 3.68s/it] {'loss': 1.2632, 'grad_norm': 0.0009897723188477393, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:00<16:29, 3.68s/it] 48%|████▊ | 252/520 [16:04<16:27, 3.68s/it] {'loss': 1.2074, 'grad_norm': 0.00102648176817095, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:04<16:27, 3.68s/it] 49%|████▊ | 253/520 [16:07<16:22, 3.68s/it] {'loss': 1.2573, 'grad_norm': 0.001136549164690765, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:07<16:22, 3.68s/it] 49%|████▉ | 254/520 [16:11<16:18, 3.68s/it] {'loss': 1.1972, 'grad_norm': 0.0009855412031056295, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:11<16:18, 3.68s/it] 49%|████▉ | 255/520 [16:15<16:12, 3.67s/it] {'loss': 1.1964, 'grad_norm': 0.0011213436218510866, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:15<16:12, 3.67s/it] 49%|████▉ | 256/520 [16:18<16:06, 3.66s/it] {'loss': 1.2505, 'grad_norm': 0.0010926759979708787, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:18<16:06, 3.66s/it] 49%|████▉ | 257/520 [16:22<16:04, 3.67s/it] {'loss': 1.2277, 'grad_norm': 0.001087470956703171, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:22<16:04, 3.67s/it] 50%|████▉ | 258/520 [16:26<15:59, 3.66s/it] {'loss': 1.2363, 'grad_norm': 0.0010596863481559687, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:26<15:59, 3.66s/it] 50%|████▉ | 259/520 [16:29<15:57, 3.67s/it] {'loss': 1.3152, 'grad_norm': 0.0011782453844259254, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:29<15:57, 3.67s/it] 50%|█████ | 260/520 [16:33<15:55, 3.68s/it] {'loss': 1.2467, 'grad_norm': 0.0009986613380793083, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:33<15:55, 3.68s/it] 50%|█████ | 261/520 [16:37<15:53, 3.68s/it] {'loss': 1.1915, 'grad_norm': 0.0010620921751239134, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:37<15:53, 3.68s/it] 50%|█████ | 262/520 [16:40<15:49, 3.68s/it] {'loss': 1.1717, 'grad_norm': 0.001071541660211046, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:40<15:49, 3.68s/it] 51%|█████ | 263/520 [16:44<15:43, 3.67s/it] {'loss': 1.2156, 'grad_norm': 0.0010258805502342191, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:44<15:43, 3.67s/it] 51%|█████ | 264/520 [16:48<15:42, 3.68s/it] {'loss': 1.263, 'grad_norm': 0.0009844305072303102, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:48<15:42, 3.68s/it] 51%|█████ | 265/520 [16:51<15:40, 3.69s/it] {'loss': 1.1823, 'grad_norm': 0.001130481270378879, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:51<15:40, 3.69s/it] 51%|█████ | 266/520 [16:55<15:35, 3.68s/it] {'loss': 1.0605, 'grad_norm': 0.0009624608890208135, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:55<15:35, 3.68s/it] 51%|█████▏ | 267/520 [16:59<15:31, 3.68s/it] {'loss': 1.1779, 'grad_norm': 0.0010274997362453206, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:59<15:31, 3.68s/it] 52%|█████▏ | 268/520 [17:02<15:28, 3.68s/it] {'loss': 1.3157, 'grad_norm': 0.0015019376953769575, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:02<15:28, 3.68s/it] 52%|█████▏ | 269/520 [17:06<15:22, 3.67s/it] {'loss': 1.2802, 'grad_norm': 0.001099099808553813, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:06<15:22, 3.67s/it] 52%|█████▏ | 270/520 [17:10<15:22, 3.69s/it] {'loss': 1.1475, 'grad_norm': 0.0010122936601102005, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:10<15:22, 3.69s/it] 52%|█████▏ | 271/520 [17:13<15:16, 3.68s/it] {'loss': 1.2652, 'grad_norm': 0.0010223239486727136, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:13<15:16, 3.68s/it] 52%|█████▏ | 272/520 [17:17<15:13, 3.69s/it] {'loss': 1.1631, 'grad_norm': 0.0010181337356331612, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:17<15:13, 3.69s/it] 52%|█████▎ | 273/520 [17:21<15:09, 3.68s/it] {'loss': 1.2689, 'grad_norm': 0.0010056380663385632, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:21<15:09, 3.68s/it] 53%|█████▎ | 274/520 [17:24<15:05, 3.68s/it] {'loss': 1.2464, 'grad_norm': 0.0011936287664043388, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:24<15:05, 3.68s/it] 53%|█████▎ | 275/520 [17:28<15:02, 3.68s/it] {'loss': 1.188, 'grad_norm': 0.0012258359746444977, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:28<15:02, 3.68s/it] 53%|█████▎ | 276/520 [17:32<14:57, 3.68s/it] {'loss': 1.2525, 'grad_norm': 0.001269087148318007, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:32<14:57, 3.68s/it] 53%|█████▎ | 277/520 [17:36<14:56, 3.69s/it] {'loss': 1.2817, 'grad_norm': 0.0009679815173954336, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:36<14:56, 3.69s/it] 53%|█████▎ | 278/520 [17:39<14:53, 3.69s/it] {'loss': 1.1424, 'grad_norm': 0.0009343975422193504, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:39<14:53, 3.69s/it] 54%|█████▎ | 279/520 [17:43<14:51, 3.70s/it] {'loss': 1.1524, 'grad_norm': 0.0010505534708959972, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:43<14:51, 3.70s/it] 54%|█████▍ | 280/520 [17:47<14:47, 3.70s/it] {'loss': 1.1866, 'grad_norm': 0.0012776400977549166, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:47<14:47, 3.70s/it] 54%|█████▍ | 281/520 [17:50<14:41, 3.69s/it] {'loss': 1.2861, 'grad_norm': 0.0010922940646991312, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:50<14:41, 3.69s/it] 54%|█████▍ | 282/520 [17:54<14:38, 3.69s/it] {'loss': 1.1545, 'grad_norm': 0.0009768990716565158, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:54<14:38, 3.69s/it] 54%|█████▍ | 283/520 [17:58<14:33, 3.69s/it] {'loss': 1.2994, 'grad_norm': 0.001121835413862547, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:58<14:33, 3.69s/it] 55%|█████▍ | 284/520 [18:01<14:29, 3.68s/it] {'loss': 1.1633, 'grad_norm': 0.0011436959592533433, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:01<14:29, 3.68s/it] 55%|█████▍ | 285/520 [18:05<14:24, 3.68s/it] {'loss': 1.1783, 'grad_norm': 0.001010479144900663, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:05<14:24, 3.68s/it] 55%|█████▌ | 286/520 [18:09<14:20, 3.68s/it] {'loss': 1.0618, 'grad_norm': 0.0010642282346050038, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:09<14:20, 3.68s/it] 55%|█████▌ | 287/520 [18:12<14:19, 3.69s/it] {'loss': 1.2865, 'grad_norm': 0.0010163680264377133, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:12<14:19, 3.69s/it] 55%|█████▌ | 288/520 [18:16<14:14, 3.68s/it] {'loss': 1.3165, 'grad_norm': 0.001001791392996286, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:16<14:14, 3.68s/it] 56%|█████▌ | 289/520 [18:20<14:11, 3.69s/it] {'loss': 1.1916, 'grad_norm': 0.0009763640738928866, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:20<14:11, 3.69s/it] 56%|█████▌ | 290/520 [18:23<14:04, 3.67s/it] {'loss': 1.1211, 'grad_norm': 0.0009498728094746371, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:23<14:04, 3.67s/it] 56%|█████▌ | 291/520 [18:27<14:01, 3.67s/it] {'loss': 1.165, 'grad_norm': 0.0009791149894606034, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:27<14:01, 3.67s/it] 56%|█████▌ | 292/520 [18:31<13:55, 3.67s/it] {'loss': 1.2146, 'grad_norm': 0.0009987431293751055, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:31<13:55, 3.67s/it] 56%|█████▋ | 293/520 [18:34<13:50, 3.66s/it] {'loss': 1.1632, 'grad_norm': 0.0010682939149556305, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:34<13:50, 3.66s/it] 57%|█████▋ | 294/520 [18:38<13:49, 3.67s/it] {'loss': 1.1843, 'grad_norm': 0.0011192009844048492, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:38<13:49, 3.67s/it] 57%|█████▋ | 295/520 [18:42<13:48, 3.68s/it] {'loss': 1.2009, 'grad_norm': 0.0010673724878707401, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:42<13:48, 3.68s/it] 57%|█████▋ | 296/520 [18:45<13:42, 3.67s/it] {'loss': 1.1371, 'grad_norm': 0.0011094920597164734, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:45<13:42, 3.67s/it] 57%|█████▋ | 297/520 [18:49<13:38, 3.67s/it] {'loss': 1.2659, 'grad_norm': 0.0011058322919312224, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:49<13:38, 3.67s/it] 57%|█████▋ | 298/520 [18:53<13:34, 3.67s/it] {'loss': 1.2267, 'grad_norm': 0.000978973170277008, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:53<13:34, 3.67s/it] 57%|█████▊ | 299/520 [18:56<13:32, 3.68s/it] {'loss': 1.2372, 'grad_norm': 0.0009484082704590172, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:56<13:32, 3.68s/it] 58%|█████▊ | 300/520 [19:00<13:29, 3.68s/it] {'loss': 1.2776, 'grad_norm': 0.0010475533734697795, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:00<13:29, 3.68s/it] 58%|█████▊ | 301/520 [19:04<13:24, 3.67s/it] {'loss': 1.2539, 'grad_norm': 0.0010073275397495093, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:04<13:24, 3.67s/it] 58%|█████▊ | 302/520 [19:08<13:21, 3.68s/it] {'loss': 1.2514, 'grad_norm': 0.001115104014347412, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:08<13:21, 3.68s/it] 58%|█████▊ | 303/520 [19:11<13:20, 3.69s/it] {'loss': 1.1809, 'grad_norm': 0.001138677581403096, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:11<13:20, 3.69s/it] 58%|█████▊ | 304/520 [19:15<13:20, 3.71s/it] {'loss': 1.154, 'grad_norm': 0.0011666302817526786, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:15<13:20, 3.71s/it] 59%|█████▊ | 305/520 [19:19<13:16, 3.70s/it] {'loss': 1.2827, 'grad_norm': 0.0011692273675016833, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:19<13:16, 3.70s/it] 59%|█████▉ | 306/520 [19:22<13:16, 3.72s/it] {'loss': 1.2316, 'grad_norm': 0.0010759661206928078, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:22<13:16, 3.72s/it] 59%|█████▉ | 307/520 [19:26<13:10, 3.71s/it] {'loss': 1.1701, 'grad_norm': 0.0010162394724659008, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:26<13:10, 3.71s/it] 59%|█████▉ | 308/520 [19:30<13:08, 3.72s/it] {'loss': 1.2861, 'grad_norm': 0.001062369935046739, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:30<13:08, 3.72s/it] 59%|█████▉ | 309/520 [19:34<13:30, 3.84s/it] {'loss': 1.1746, 'grad_norm': 0.0009876092994872882, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:34<13:30, 3.84s/it] 60%|█████▉ | 310/520 [19:38<13:18, 3.80s/it] {'loss': 1.1542, 'grad_norm': 0.0010403074775238283, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:38<13:18, 3.80s/it] 60%|█████▉ | 311/520 [19:41<13:07, 3.77s/it] {'loss': 1.1275, 'grad_norm': 0.001085679630575199, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:41<13:07, 3.77s/it] 60%|██████ | 312/520 [19:45<12:59, 3.75s/it] {'loss': 1.1183, 'grad_norm': 0.0010561333009608815, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:45<12:59, 3.75s/it] 60%|██████ | 313/520 [19:49<12:51, 3.73s/it] {'loss': 1.1086, 'grad_norm': 0.0009411859296118956, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:49<12:51, 3.73s/it] 60%|██████ | 314/520 [19:53<13:08, 3.83s/it] {'loss': 1.1422, 'grad_norm': 0.0010119633213886503, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:53<13:08, 3.83s/it] 61%|██████ | 315/520 [19:57<12:58, 3.80s/it] {'loss': 1.2015, 'grad_norm': 0.0012803899278838468, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:57<12:58, 3.80s/it] 61%|██████ | 316/520 [20:01<13:18, 3.91s/it] {'loss': 1.1267, 'grad_norm': 0.0010527772781026177, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:01<13:18, 3.91s/it] 61%|██████ | 317/520 [20:04<13:01, 3.85s/it] {'loss': 1.1366, 'grad_norm': 0.0009153508569438412, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:04<13:01, 3.85s/it] 61%|██████ | 318/520 [20:08<12:50, 3.81s/it] {'loss': 1.2488, 'grad_norm': 0.001133317204874169, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:08<12:50, 3.81s/it] 61%|██████▏ | 319/520 [20:12<13:07, 3.92s/it] {'loss': 1.1271, 'grad_norm': 0.0009420350329162336, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:12<13:07, 3.92s/it] 62%|██████▏ | 320/520 [20:16<12:49, 3.85s/it] {'loss': 1.0762, 'grad_norm': 0.001038401560583174, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:16<12:49, 3.85s/it] 62%|██████▏ | 321/520 [20:20<12:36, 3.80s/it] {'loss': 1.2691, 'grad_norm': 0.0010315269972505563, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:20<12:36, 3.80s/it] 62%|██████▏ | 322/520 [20:23<12:26, 3.77s/it] {'loss': 1.1004, 'grad_norm': 0.0009524465737734165, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:23<12:26, 3.77s/it] 62%|██████▏ | 323/520 [20:27<12:17, 3.74s/it] {'loss': 1.1702, 'grad_norm': 0.0010151465880133446, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:27<12:17, 3.74s/it] 62%|██████▏ | 324/520 [20:31<12:11, 3.73s/it] {'loss': 1.2062, 'grad_norm': 0.0010220868161914599, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:31<12:11, 3.73s/it] 62%|██████▎ | 325/520 [20:34<12:03, 3.71s/it] {'loss': 1.2081, 'grad_norm': 0.0011006085702383362, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:34<12:03, 3.71s/it] 63%|██████▎ | 326/520 [20:38<11:59, 3.71s/it] {'loss': 1.2052, 'grad_norm': 0.0010412797944838942, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:38<11:59, 3.71s/it] 63%|██████▎ | 327/520 [20:42<11:57, 3.72s/it] {'loss': 1.209, 'grad_norm': 0.0010839176653453172, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:42<11:57, 3.72s/it] 63%|██████▎ | 328/520 [20:46<11:49, 3.70s/it] {'loss': 1.2504, 'grad_norm': 0.0010699179679877046, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:46<11:49, 3.70s/it] 63%|██████▎ | 329/520 [20:49<11:44, 3.69s/it] {'loss': 1.1316, 'grad_norm': 0.0008925399735969096, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:49<11:44, 3.69s/it] 63%|██████▎ | 330/520 [20:53<11:40, 3.68s/it] {'loss': 1.2004, 'grad_norm': 0.0009396412000645791, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:53<11:40, 3.68s/it] 64%|██████▎ | 331/520 [20:57<11:41, 3.71s/it] {'loss': 1.1626, 'grad_norm': 0.0010234248319733038, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:57<11:41, 3.71s/it] 64%|██████▍ | 332/520 [21:01<11:47, 3.76s/it] {'loss': 1.2343, 'grad_norm': 0.0009350518401239122, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:01<11:47, 3.76s/it] 64%|██████▍ | 333/520 [21:04<11:48, 3.79s/it] {'loss': 1.3, 'grad_norm': 0.001126117634272783, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:04<11:48, 3.79s/it] 64%|██████▍ | 334/520 [21:08<11:50, 3.82s/it] {'loss': 1.2099, 'grad_norm': 0.0010552378740658324, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:08<11:50, 3.82s/it] 64%|██████▍ | 335/520 [21:12<11:49, 3.84s/it] {'loss': 1.2103, 'grad_norm': 0.0009425392868928353, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:12<11:49, 3.84s/it] 65%|██████▍ | 336/520 [21:16<11:47, 3.85s/it] {'loss': 1.1126, 'grad_norm': 0.001151262913795524, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:16<11:47, 3.85s/it] 65%|██████▍ | 337/520 [21:20<11:46, 3.86s/it] {'loss': 1.0997, 'grad_norm': 0.0009687740761823718, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:20<11:46, 3.86s/it] 65%|██████▌ | 338/520 [21:24<11:43, 3.87s/it] {'loss': 1.2099, 'grad_norm': 0.0009706860621185213, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:24<11:43, 3.87s/it] 65%|██████▌ | 339/520 [21:28<11:54, 3.94s/it] {'loss': 1.1597, 'grad_norm': 0.0010006706597692943, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:28<11:54, 3.94s/it] 65%|██████▌ | 340/520 [21:32<12:08, 4.05s/it] {'loss': 1.1524, 'grad_norm': 0.0010274975962895987, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:32<12:08, 4.05s/it] 66%|██████▌ | 341/520 [21:36<12:12, 4.09s/it] {'loss': 1.1745, 'grad_norm': 0.0010545385302165298, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:36<12:12, 4.09s/it] 66%|██████▌ | 342/520 [21:40<11:56, 4.03s/it] {'loss': 1.2086, 'grad_norm': 0.0012480711697634194, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:40<11:56, 4.03s/it] 66%|██████▌ | 343/520 [21:44<11:45, 3.99s/it] {'loss': 1.1616, 'grad_norm': 0.001031086617586264, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:44<11:45, 3.99s/it] 66%|██████▌ | 344/520 [21:48<11:38, 3.97s/it] {'loss': 1.129, 'grad_norm': 0.0009593388831878272, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:48<11:38, 3.97s/it] 66%|██████▋ | 345/520 [21:52<11:29, 3.94s/it] {'loss': 1.2358, 'grad_norm': 0.00106018686129242, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:52<11:29, 3.94s/it] 67%|██████▋ | 346/520 [21:56<11:17, 3.89s/it] {'loss': 1.1802, 'grad_norm': 0.0010188822857479139, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:56<11:17, 3.89s/it] 67%|██████▋ | 347/520 [22:00<11:07, 3.86s/it] {'loss': 1.1421, 'grad_norm': 0.0009500503388889722, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:00<11:07, 3.86s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:03<10:55, 3.81s/it] {'loss': 1.1018, 'grad_norm': 0.0012650751663476692, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:03<10:55, 3.81s/it] 67%|██████▋ | 349/520 [22:07<10:46, 3.78s/it] {'loss': 1.1386, 'grad_norm': 0.000982357657716816, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:07<10:46, 3.78s/it] 67%|██████▋ | 350/520 [22:11<10:40, 3.77s/it] {'loss': 1.1841, 'grad_norm': 0.001066683260033554, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:11<10:40, 3.77s/it] 68%|██████▊ | 351/520 [22:14<10:34, 3.75s/it] {'loss': 1.0931, 'grad_norm': 0.0009379699273155423, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:14<10:34, 3.75s/it] 68%|██████▊ | 352/520 [22:18<10:28, 3.74s/it] {'loss': 1.2133, 'grad_norm': 0.0009662363088063696, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:18<10:28, 3.74s/it] 68%|██████▊ | 353/520 [22:22<10:24, 3.74s/it] {'loss': 1.1404, 'grad_norm': 0.0008429221169997295, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:22<10:24, 3.74s/it] 68%|██████▊ | 354/520 [22:26<10:18, 3.72s/it] {'loss': 1.2459, 'grad_norm': 0.0009222023766750011, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:26<10:18, 3.72s/it] 68%|██████▊ | 355/520 [22:29<10:13, 3.72s/it] {'loss': 1.1537, 'grad_norm': 0.0009798829506640538, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:29<10:13, 3.72s/it] 68%|██████▊ | 356/520 [22:33<10:08, 3.71s/it] {'loss': 1.1559, 'grad_norm': 0.001018925074996513, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:33<10:08, 3.71s/it] 69%|██████▊ | 357/520 [22:37<10:02, 3.70s/it] {'loss': 1.1849, 'grad_norm': 0.0009550870479154011, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:37<10:02, 3.70s/it] 69%|██████▉ | 358/520 [22:40<09:59, 3.70s/it] {'loss': 1.1221, 'grad_norm': 0.0009742534959980928, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:40<09:59, 3.70s/it] 69%|██████▉ | 359/520 [22:44<09:55, 3.70s/it] {'loss': 1.1825, 'grad_norm': 0.0010466291925727168, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:44<09:55, 3.70s/it] 69%|██████▉ | 360/520 [22:48<09:51, 3.69s/it] {'loss': 1.1928, 'grad_norm': 0.0010836476576491425, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:48<09:51, 3.69s/it] 69%|██████▉ | 361/520 [22:51<09:48, 3.70s/it] {'loss': 1.2026, 'grad_norm': 0.0009454861951348933, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:51<09:48, 3.70s/it] 70%|██████▉ | 362/520 [22:55<09:42, 3.69s/it] {'loss': 1.1706, 'grad_norm': 0.0010544496711005324, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:55<09:42, 3.69s/it] 70%|██████▉ | 363/520 [22:59<09:38, 3.68s/it] {'loss': 1.1928, 'grad_norm': 0.0009876335756496034, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:59<09:38, 3.68s/it] 70%|███████ | 364/520 [23:02<09:36, 3.69s/it] {'loss': 1.2244, 'grad_norm': 0.000998831364825076, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:02<09:36, 3.69s/it] 70%|███████ | 365/520 [23:06<09:33, 3.70s/it] {'loss': 1.249, 'grad_norm': 0.0010495940179943081, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:06<09:33, 3.70s/it] 70%|███████ | 366/520 [23:10<09:31, 3.71s/it] {'loss': 1.2064, 'grad_norm': 0.0009979229522657004, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:10<09:31, 3.71s/it] 71%|███████ | 367/520 [23:14<09:27, 3.71s/it] {'loss': 1.211, 'grad_norm': 0.0010170464352512148, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:14<09:27, 3.71s/it] 71%|███████ | 368/520 [23:17<09:22, 3.70s/it] {'loss': 1.0619, 'grad_norm': 0.0011072899301266222, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:17<09:22, 3.70s/it] 71%|███████ | 369/520 [23:21<09:19, 3.70s/it] {'loss': 1.1777, 'grad_norm': 0.000880607436370123, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:21<09:19, 3.70s/it] 71%|███████ | 370/520 [23:25<09:15, 3.70s/it] {'loss': 1.1237, 'grad_norm': 0.0009663423971586766, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:25<09:15, 3.70s/it] 71%|███████▏ | 371/520 [23:28<09:10, 3.70s/it] {'loss': 1.1231, 'grad_norm': 0.0010036883955878613, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:28<09:10, 3.70s/it] 72%|███████▏ | 372/520 [23:32<09:07, 3.70s/it] {'loss': 1.2503, 'grad_norm': 0.000931083945830235, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:32<09:07, 3.70s/it] 72%|███████▏ | 373/520 [23:36<09:04, 3.70s/it] {'loss': 1.1388, 'grad_norm': 0.0010509589555552671, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:36<09:04, 3.70s/it] 72%|███████▏ | 374/520 [23:40<09:00, 3.71s/it] {'loss': 1.2105, 'grad_norm': 0.001002635722484392, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:40<09:00, 3.71s/it] 72%|███████▏ | 375/520 [23:43<08:57, 3.70s/it] {'loss': 1.1288, 'grad_norm': 0.000976046907559128, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:43<08:57, 3.70s/it] 72%|███████▏ | 376/520 [23:47<08:54, 3.71s/it] {'loss': 1.2363, 'grad_norm': 0.00096758755722974, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:47<08:54, 3.71s/it] 72%|███████▎ | 377/520 [23:51<08:50, 3.71s/it] {'loss': 1.1677, 'grad_norm': 0.0010127714968940122, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:51<08:50, 3.71s/it] 73%|███████▎ | 378/520 [23:54<08:47, 3.71s/it] {'loss': 1.2292, 'grad_norm': 0.0009599920822730883, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:54<08:47, 3.71s/it] 73%|███████▎ | 379/520 [23:58<08:42, 3.71s/it] {'loss': 1.2071, 'grad_norm': 0.0009466430644072472, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:58<08:42, 3.71s/it] 73%|███████▎ | 380/520 [24:02<08:38, 3.71s/it] {'loss': 1.2254, 'grad_norm': 0.0009858301985767443, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:02<08:38, 3.71s/it] 73%|███████▎ | 381/520 [24:05<08:33, 3.70s/it] {'loss': 1.2078, 'grad_norm': 0.0009584060179397918, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:05<08:33, 3.70s/it] 73%|███████▎ | 382/520 [24:09<08:31, 3.71s/it] {'loss': 1.1894, 'grad_norm': 0.0009442395057089136, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:09<08:31, 3.71s/it] 74%|███████▎ | 383/520 [24:13<08:28, 3.71s/it] {'loss': 1.0477, 'grad_norm': 0.0010922757180616832, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:13<08:28, 3.71s/it] 74%|███████▍ | 384/520 [24:17<08:23, 3.70s/it] {'loss': 1.2308, 'grad_norm': 0.0009140848216490986, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:17<08:23, 3.70s/it] 74%|███████▍ | 385/520 [24:20<08:21, 3.71s/it] {'loss': 1.1855, 'grad_norm': 0.0009148627343322333, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:20<08:21, 3.71s/it] 74%|███████▍ | 386/520 [24:24<08:16, 3.71s/it] {'loss': 1.1423, 'grad_norm': 0.0008744823914839278, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:24<08:16, 3.71s/it] 74%|███████▍ | 387/520 [24:28<08:12, 3.70s/it] {'loss': 1.2518, 'grad_norm': 0.001043411140015903, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:28<08:12, 3.70s/it] 75%|███████▍ | 388/520 [24:31<08:08, 3.70s/it] {'loss': 1.0951, 'grad_norm': 0.0009500404702406617, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:31<08:08, 3.70s/it] 75%|███████▍ | 389/520 [24:35<08:04, 3.70s/it] {'loss': 1.1416, 'grad_norm': 0.0011216857968249392, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:35<08:04, 3.70s/it] 75%|███████▌ | 390/520 [24:39<08:00, 3.70s/it] {'loss': 1.2047, 'grad_norm': 0.00096992966204011, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:39<08:00, 3.70s/it] 75%|███████▌ | 391/520 [24:43<08:01, 3.73s/it] {'loss': 1.2784, 'grad_norm': 0.001046803344250003, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:43<08:01, 3.73s/it] 75%|███████▌ | 392/520 [24:46<07:56, 3.72s/it] {'loss': 1.1005, 'grad_norm': 0.0010365197084896505, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:46<07:56, 3.72s/it] 76%|███████▌ | 393/520 [24:50<07:52, 3.72s/it] {'loss': 1.0973, 'grad_norm': 0.0008387350635198253, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:50<07:52, 3.72s/it] 76%|███████▌ | 394/520 [24:54<07:47, 3.71s/it] {'loss': 1.1653, 'grad_norm': 0.0010303013517027428, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:54<07:47, 3.71s/it] 76%|███████▌ | 395/520 [24:57<07:43, 3.71s/it] {'loss': 1.1298, 'grad_norm': 0.0010478929237692687, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:57<07:43, 3.71s/it] 76%|███████▌ | 396/520 [25:01<07:38, 3.70s/it] {'loss': 1.2139, 'grad_norm': 0.0010840956283731595, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:01<07:38, 3.70s/it] 76%|███████▋ | 397/520 [25:05<07:35, 3.71s/it] {'loss': 1.1856, 'grad_norm': 0.0009389481517737717, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:05<07:35, 3.71s/it] 77%|███████▋ | 398/520 [25:09<07:32, 3.71s/it] {'loss': 1.1866, 'grad_norm': 0.001044022606255716, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:09<07:32, 3.71s/it] 77%|███████▋ | 399/520 [25:12<07:29, 3.72s/it] {'loss': 1.1362, 'grad_norm': 0.0009288088576095265, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:12<07:29, 3.72s/it] 77%|███████▋ | 400/520 [25:16<07:26, 3.72s/it] {'loss': 1.1682, 'grad_norm': 0.0008757689073518607, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:16<07:26, 3.72s/it] 77%|███████▋ | 401/520 [25:20<07:22, 3.72s/it] {'loss': 1.0215, 'grad_norm': 0.0010554990328810916, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:20<07:22, 3.72s/it] 77%|███████▋ | 402/520 [25:23<07:17, 3.71s/it] {'loss': 1.1472, 'grad_norm': 0.0010277150997106195, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:23<07:17, 3.71s/it] 78%|███████▊ | 403/520 [25:27<07:13, 3.70s/it] {'loss': 1.1727, 'grad_norm': 0.0010863662526039162, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:27<07:13, 3.70s/it] 78%|███████▊ | 404/520 [25:31<07:10, 3.71s/it] {'loss': 1.0815, 'grad_norm': 0.001137519881490665, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:31<07:10, 3.71s/it] 78%|███████▊ | 405/520 [25:34<07:06, 3.70s/it] {'loss': 1.1488, 'grad_norm': 0.0009846395771243549, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:34<07:06, 3.70s/it] 78%|███████▊ | 406/520 [25:38<07:02, 3.71s/it] {'loss': 1.0653, 'grad_norm': 0.0012611020051968684, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:38<07:02, 3.71s/it] 78%|███████▊ | 407/520 [25:42<06:58, 3.71s/it] {'loss': 1.249, 'grad_norm': 0.001038394931828285, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:42<06:58, 3.71s/it] 78%|███████▊ | 408/520 [25:46<06:53, 3.69s/it] {'loss': 1.1626, 'grad_norm': 0.0011641234040603607, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:46<06:53, 3.69s/it] 79%|███████▊ | 409/520 [25:49<06:48, 3.68s/it] {'loss': 1.2749, 'grad_norm': 0.0010773111461659624, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:49<06:48, 3.68s/it] 79%|███████▉ | 410/520 [25:53<06:44, 3.68s/it] {'loss': 1.013, 'grad_norm': 0.0009979483778023049, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:53<06:44, 3.68s/it] 79%|███████▉ | 411/520 [25:57<06:40, 3.67s/it] {'loss': 1.2593, 'grad_norm': 0.0011019391231140345, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:57<06:40, 3.67s/it] 79%|███████▉ | 412/520 [26:00<06:36, 3.67s/it] {'loss': 1.1669, 'grad_norm': 0.0010267383814298154, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:00<06:36, 3.67s/it] 79%|███████▉ | 413/520 [26:04<06:32, 3.67s/it] {'loss': 1.1593, 'grad_norm': 0.0009553825237592565, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:04<06:32, 3.67s/it] 80%|███████▉ | 414/520 [26:08<06:30, 3.68s/it] {'loss': 0.9733, 'grad_norm': 0.0008187154894774925, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:08<06:30, 3.68s/it] 80%|███████▉ | 415/520 [26:11<06:26, 3.68s/it] {'loss': 1.1475, 'grad_norm': 0.0009326331521726593, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:11<06:26, 3.68s/it] 80%|████████ | 416/520 [26:15<06:22, 3.68s/it] {'loss': 1.0654, 'grad_norm': 0.0010811746388789917, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:15<06:22, 3.68s/it] 80%|████████ | 417/520 [26:19<06:18, 3.67s/it] {'loss': 1.2222, 'grad_norm': 0.0010749032838470315, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:19<06:18, 3.67s/it] 80%|████████ | 418/520 [26:22<06:15, 3.68s/it] {'loss': 1.2124, 'grad_norm': 0.00095065071308702, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:22<06:15, 3.68s/it] 81%|████████ | 419/520 [26:26<06:11, 3.68s/it] {'loss': 1.2025, 'grad_norm': 0.0010920734414694166, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:26<06:11, 3.68s/it] 81%|████████ | 420/520 [26:30<06:08, 3.69s/it] {'loss': 1.095, 'grad_norm': 0.0010571824770860075, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:30<06:08, 3.69s/it] 81%|████████ | 421/520 [26:33<06:05, 3.69s/it] {'loss': 1.0326, 'grad_norm': 0.0011553715242757633, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:33<06:05, 3.69s/it] 81%|████████ | 422/520 [26:37<06:01, 3.69s/it] {'loss': 1.1492, 'grad_norm': 0.0010331283751831312, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:37<06:01, 3.69s/it] 81%|████████▏ | 423/520 [26:41<05:57, 3.68s/it] {'loss': 1.1279, 'grad_norm': 0.0010921284381151441, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:41<05:57, 3.68s/it] 82%|████████▏ | 424/520 [26:44<05:54, 3.69s/it] {'loss': 1.2442, 'grad_norm': 0.0009680834606474449, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:44<05:54, 3.69s/it] 82%|████████▏ | 425/520 [26:48<05:49, 3.68s/it] {'loss': 1.1428, 'grad_norm': 0.0009802877921453333, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:48<05:49, 3.68s/it] 82%|████████▏ | 426/520 [26:52<05:45, 3.68s/it] {'loss': 1.1617, 'grad_norm': 0.0012530225404409254, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:52<05:45, 3.68s/it] 82%|████████▏ | 427/520 [26:55<05:41, 3.67s/it] {'loss': 1.0775, 'grad_norm': 0.0009531284282999282, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:55<05:41, 3.67s/it] 82%|████████▏ | 428/520 [26:59<05:38, 3.68s/it] {'loss': 1.0635, 'grad_norm': 0.0010515960995386789, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:59<05:38, 3.68s/it] 82%|████████▎ | 429/520 [27:03<05:35, 3.69s/it] {'loss': 1.1535, 'grad_norm': 0.0010492411713463954, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:03<05:35, 3.69s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:06<05:31, 3.68s/it] {'loss': 1.1581, 'grad_norm': 0.0009258406148656327, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:06<05:31, 3.68s/it] 83%|████████▎ | 431/520 [27:10<05:28, 3.69s/it] {'loss': 1.1322, 'grad_norm': 0.0010724459529898832, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:10<05:28, 3.69s/it] 83%|████████▎ | 432/520 [27:14<05:24, 3.69s/it] {'loss': 1.0673, 'grad_norm': 0.0010070336252118383, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:14<05:24, 3.69s/it] 83%|████████▎ | 433/520 [27:18<05:20, 3.69s/it] {'loss': 1.1978, 'grad_norm': 0.0009820440035893105, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:18<05:20, 3.69s/it] 83%|████████▎ | 434/520 [27:21<05:18, 3.70s/it] {'loss': 0.9499, 'grad_norm': 0.0009899209106751154, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:21<05:18, 3.70s/it] 84%|████████▎ | 435/520 [27:25<05:18, 3.75s/it] {'loss': 1.2324, 'grad_norm': 0.0010808987398039805, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:25<05:18, 3.75s/it] 84%|████████▍ | 436/520 [27:29<05:18, 3.79s/it] {'loss': 1.0386, 'grad_norm': 0.0010113956100461549, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:29<05:18, 3.79s/it] 84%|████████▍ | 437/520 [27:33<05:16, 3.82s/it] {'loss': 1.2548, 'grad_norm': 0.0010089028487967175, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:33<05:16, 3.82s/it] 84%|████████▍ | 438/520 [27:37<05:14, 3.83s/it] {'loss': 1.0764, 'grad_norm': 0.0009866605743877657, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:37<05:14, 3.83s/it] 84%|████████▍ | 439/520 [27:41<05:11, 3.85s/it] {'loss': 1.1188, 'grad_norm': 0.0008432496683082949, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:41<05:11, 3.85s/it] 85%|████████▍ | 440/520 [27:45<05:08, 3.86s/it] {'loss': 1.1091, 'grad_norm': 0.0010240366762535424, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:45<05:08, 3.86s/it] 85%|████████▍ | 441/520 [27:48<05:05, 3.86s/it] {'loss': 1.1282, 'grad_norm': 0.0009796930800653452, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:48<05:05, 3.86s/it] 85%|████████▌ | 442/520 [27:52<05:01, 3.87s/it] {'loss': 1.1736, 'grad_norm': 0.0010750383861979581, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:52<05:01, 3.87s/it] 85%|████████▌ | 443/520 [27:56<04:57, 3.86s/it] {'loss': 1.1872, 'grad_norm': 0.0009749691284316929, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:56<04:57, 3.86s/it] 85%|████████▌ | 444/520 [28:00<04:54, 3.88s/it] {'loss': 1.1491, 'grad_norm': 0.0008927940844617364, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:00<04:54, 3.88s/it] 86%|████████▌ | 445/520 [28:04<04:50, 3.87s/it] {'loss': 1.0785, 'grad_norm': 0.0009568908699904654, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:04<04:50, 3.87s/it] 86%|████████▌ | 446/520 [28:08<04:47, 3.88s/it] {'loss': 1.2044, 'grad_norm': 0.000905135522962375, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:08<04:47, 3.88s/it] 86%|████████▌ | 447/520 [28:12<04:44, 3.89s/it] {'loss': 1.155, 'grad_norm': 0.0009850650033194383, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:12<04:44, 3.89s/it] 86%|████████▌ | 448/520 [28:15<04:36, 3.84s/it] {'loss': 1.1475, 'grad_norm': 0.0009972938396170133, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:15<04:36, 3.84s/it] 86%|████████▋ | 449/520 [28:19<04:30, 3.81s/it] {'loss': 1.1621, 'grad_norm': 0.0009929482989061138, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:19<04:30, 3.81s/it] 87%|████████▋ | 450/520 [28:23<04:23, 3.77s/it] {'loss': 1.1767, 'grad_norm': 0.0009821118967728005, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:23<04:23, 3.77s/it] 87%|████████▋ | 451/520 [28:27<04:19, 3.76s/it] {'loss': 1.1757, 'grad_norm': 0.0010054098429449923, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:27<04:19, 3.76s/it] 87%|████████▋ | 452/520 [28:30<04:14, 3.75s/it] {'loss': 1.206, 'grad_norm': 0.0009136727382201958, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:30<04:14, 3.75s/it] 87%|████████▋ | 453/520 [28:34<04:10, 3.74s/it] {'loss': 1.1816, 'grad_norm': 0.0009657533641228283, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:34<04:10, 3.74s/it] 87%|████████▋ | 454/520 [28:38<04:05, 3.72s/it] {'loss': 1.0883, 'grad_norm': 0.0010204623981205313, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:38<04:05, 3.72s/it] 88%|████████▊ | 455/520 [28:41<04:02, 3.73s/it] {'loss': 1.2254, 'grad_norm': 0.000976983452268724, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:41<04:02, 3.73s/it] 88%|████████▊ | 456/520 [28:45<03:57, 3.71s/it] {'loss': 1.152, 'grad_norm': 0.0009990575434958814, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:45<03:57, 3.71s/it] 88%|████████▊ | 457/520 [28:49<03:53, 3.71s/it] {'loss': 1.0867, 'grad_norm': 0.000872870494199465, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:49<03:53, 3.71s/it] 88%|████████▊ | 458/520 [28:53<03:50, 3.71s/it] {'loss': 1.2783, 'grad_norm': 0.0010774944347965423, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:53<03:50, 3.71s/it] 88%|████████▊ | 459/520 [28:56<03:46, 3.71s/it] {'loss': 1.2102, 'grad_norm': 0.0010607779488992026, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:56<03:46, 3.71s/it] 88%|████████▊ | 460/520 [29:00<03:42, 3.71s/it] {'loss': 1.1002, 'grad_norm': 0.0009616435721893221, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:00<03:42, 3.71s/it] 89%|████████▊ | 461/520 [29:04<03:38, 3.71s/it] {'loss': 1.1675, 'grad_norm': 0.0007627826290664862, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:04<03:38, 3.71s/it] 89%|████████▉ | 462/520 [29:07<03:35, 3.71s/it] {'loss': 1.252, 'grad_norm': 0.0009445456932502233, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:07<03:35, 3.71s/it] 89%|████████▉ | 463/520 [29:11<03:32, 3.73s/it] {'loss': 1.0587, 'grad_norm': 0.0010180269304125391, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:11<03:32, 3.73s/it] 89%|████████▉ | 464/520 [29:15<03:28, 3.72s/it] {'loss': 1.1889, 'grad_norm': 0.0010674840689698377, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:15<03:28, 3.72s/it] 89%|████████▉ | 465/520 [29:19<03:23, 3.71s/it] {'loss': 1.293, 'grad_norm': 0.0010635703339564406, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:19<03:23, 3.71s/it] 90%|████████▉ | 466/520 [29:22<03:21, 3.73s/it] {'loss': 1.1829, 'grad_norm': 0.0009325788774163672, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:22<03:21, 3.73s/it] 90%|████████▉ | 467/520 [29:26<03:17, 3.72s/it] {'loss': 1.1426, 'grad_norm': 0.0008938717329646644, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:26<03:17, 3.72s/it] 90%|█████████ | 468/520 [29:30<03:13, 3.71s/it] {'loss': 1.1584, 'grad_norm': 0.001116719726535072, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:30<03:13, 3.71s/it] 90%|█████████ | 469/520 [29:33<03:09, 3.71s/it] {'loss': 1.2188, 'grad_norm': 0.0010951788287738376, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:33<03:09, 3.71s/it] 90%|█████████ | 470/520 [29:37<03:04, 3.70s/it] {'loss': 1.1006, 'grad_norm': 0.000930032624904574, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:37<03:04, 3.70s/it] 91%|█████████ | 471/520 [29:41<03:00, 3.69s/it] {'loss': 1.1223, 'grad_norm': 0.0010377111947766633, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:41<03:00, 3.69s/it] 91%|█████████ | 472/520 [29:45<02:57, 3.70s/it] {'loss': 1.092, 'grad_norm': 0.0010161612867876204, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:45<02:57, 3.70s/it] 91%|█████████ | 473/520 [29:48<02:53, 3.69s/it] {'loss': 1.1542, 'grad_norm': 0.0010794384167195704, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:48<02:53, 3.69s/it] 91%|█████████ | 474/520 [29:52<02:49, 3.69s/it] {'loss': 1.1796, 'grad_norm': 0.0009486183676928837, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:52<02:49, 3.69s/it] 91%|█████████▏| 475/520 [29:56<02:45, 3.69s/it] {'loss': 1.0974, 'grad_norm': 0.0009310617088731417, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:56<02:45, 3.69s/it] 92%|█████████▏| 476/520 [29:59<02:42, 3.69s/it] {'loss': 1.1469, 'grad_norm': 0.0010298641394048166, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:59<02:42, 3.69s/it] 92%|█████████▏| 477/520 [30:03<02:38, 3.68s/it] {'loss': 1.1366, 'grad_norm': 0.0010874774920060363, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:03<02:38, 3.68s/it] 92%|█████████▏| 478/520 [30:07<02:34, 3.68s/it] {'loss': 1.0878, 'grad_norm': 0.0010065067739953044, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:07<02:34, 3.68s/it] 92%|█████████▏| 479/520 [30:10<02:30, 3.67s/it] {'loss': 1.1448, 'grad_norm': 0.0010342712452776068, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:10<02:30, 3.67s/it] 92%|█████████▏| 480/520 [30:14<02:27, 3.68s/it] {'loss': 1.1634, 'grad_norm': 0.0009176202548963975, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:14<02:27, 3.68s/it] 92%|█████████▎| 481/520 [30:18<02:23, 3.68s/it] {'loss': 1.154, 'grad_norm': 0.0008931601803234965, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:18<02:23, 3.68s/it] 93%|█████████▎| 482/520 [30:21<02:19, 3.68s/it] {'loss': 1.1737, 'grad_norm': 0.0009444015541133458, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:21<02:19, 3.68s/it] 93%|█████████▎| 483/520 [30:25<02:16, 3.68s/it] {'loss': 1.1532, 'grad_norm': 0.0010343347863752618, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:25<02:16, 3.68s/it] 93%|█████████▎| 484/520 [30:29<02:12, 3.69s/it] {'loss': 1.1606, 'grad_norm': 0.001022994366765556, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:29<02:12, 3.69s/it] 93%|█████████▎| 485/520 [30:32<02:09, 3.69s/it] {'loss': 1.115, 'grad_norm': 0.0009480944491838997, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:32<02:09, 3.69s/it] 93%|█████████▎| 486/520 [30:36<02:05, 3.68s/it] {'loss': 1.2346, 'grad_norm': 0.001053951124375313, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:36<02:05, 3.68s/it] 94%|█████████▎| 487/520 [30:40<02:02, 3.71s/it] {'loss': 1.0911, 'grad_norm': 0.001096803029840651, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:40<02:02, 3.71s/it] 94%|█████████▍| 488/520 [30:44<01:59, 3.72s/it] {'loss': 1.0371, 'grad_norm': 0.0010188378859651024, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:44<01:59, 3.72s/it] 94%|█████████▍| 489/520 [30:47<01:54, 3.70s/it] {'loss': 1.1756, 'grad_norm': 0.0008382359764131764, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:47<01:54, 3.70s/it] 94%|█████████▍| 490/520 [30:51<01:50, 3.70s/it] {'loss': 1.1544, 'grad_norm': 0.0010123006033888676, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:51<01:50, 3.70s/it] 94%|█████████▍| 491/520 [30:55<01:47, 3.70s/it] {'loss': 1.1221, 'grad_norm': 0.001047704242769242, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:55<01:47, 3.70s/it] 95%|█████████▍| 492/520 [30:58<01:44, 3.73s/it] {'loss': 1.23, 'grad_norm': 0.0010631478931511407, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:58<01:44, 3.73s/it] 95%|█████████▍| 493/520 [31:02<01:40, 3.73s/it] {'loss': 1.1728, 'grad_norm': 0.0009796772268555907, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:02<01:40, 3.73s/it] 95%|█████████▌| 494/520 [31:06<01:37, 3.76s/it] {'loss': 1.1723, 'grad_norm': 0.0009330270646752995, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:06<01:37, 3.76s/it] 95%|█████████▌| 495/520 [31:10<01:34, 3.77s/it] {'loss': 1.1401, 'grad_norm': 0.0009960009712990988, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:10<01:34, 3.77s/it] 95%|█████████▌| 496/520 [31:14<01:30, 3.78s/it] {'loss': 1.0603, 'grad_norm': 0.0010784025161384876, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:14<01:30, 3.78s/it] 96%|█████████▌| 497/520 [31:17<01:26, 3.78s/it] {'loss': 1.1037, 'grad_norm': 0.0008729094721137174, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:17<01:26, 3.78s/it] 96%|█████████▌| 498/520 [31:21<01:23, 3.80s/it] {'loss': 1.1351, 'grad_norm': 0.0009740311536441229, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:21<01:23, 3.80s/it] 96%|█████████▌| 499/520 [31:25<01:19, 3.80s/it] {'loss': 1.2429, 'grad_norm': 0.001046756909768271, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:25<01:19, 3.80s/it] 96%|█████████▌| 500/520 [31:29<01:16, 3.83s/it] {'loss': 1.2536, 'grad_norm': 0.0011433247534211057, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:29<01:16, 3.83s/it] 96%|█████████▋| 501/520 [31:33<01:13, 3.86s/it] {'loss': 1.1472, 'grad_norm': 0.001082352229964192, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:33<01:13, 3.86s/it] 97%|█████████▋| 502/520 [31:37<01:09, 3.87s/it] {'loss': 1.1743, 'grad_norm': 0.0009431889652475529, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:37<01:09, 3.87s/it] 97%|█████████▋| 503/520 [31:41<01:06, 3.89s/it] {'loss': 1.1389, 'grad_norm': 0.0010034117893848182, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:41<01:06, 3.89s/it] 97%|█████████▋| 504/520 [31:45<01:02, 3.90s/it] {'loss': 1.1653, 'grad_norm': 0.001156539787157031, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:45<01:02, 3.90s/it] 97%|█████████▋| 505/520 [31:49<00:58, 3.91s/it] {'loss': 1.1961, 'grad_norm': 0.0010552351744258556, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:49<00:58, 3.91s/it] 97%|█████████▋| 506/520 [31:52<00:54, 3.91s/it] {'loss': 1.1305, 'grad_norm': 0.0010147641839912186, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:52<00:54, 3.91s/it] 98%|█████████▊| 507/520 [31:56<00:50, 3.92s/it] {'loss': 1.2813, 'grad_norm': 0.0008936170215930694, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:56<00:50, 3.92s/it] 98%|█████████▊| 508/520 [32:00<00:46, 3.92s/it] {'loss': 1.242, 'grad_norm': 0.001033388820222136, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:00<00:46, 3.92s/it] 98%|█████████▊| 509/520 [32:04<00:42, 3.91s/it] {'loss': 1.2168, 'grad_norm': 0.0009574445089562517, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:04<00:42, 3.91s/it] 98%|█████████▊| 510/520 [32:08<00:39, 3.91s/it] {'loss': 1.1622, 'grad_norm': 0.0009803219002155806, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:08<00:39, 3.91s/it] 98%|█████████▊| 511/520 [32:12<00:35, 3.90s/it] {'loss': 1.134, 'grad_norm': 0.0009485465738452739, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:12<00:35, 3.90s/it] 98%|█████████▊| 512/520 [32:16<00:31, 3.91s/it] {'loss': 1.0223, 'grad_norm': 0.0010084971733823765, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:16<00:31, 3.91s/it] 99%|█████████▊| 513/520 [32:20<00:27, 3.91s/it] {'loss': 1.2179, 'grad_norm': 0.0011358006824803949, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:20<00:27, 3.91s/it] 99%|█████████▉| 514/520 [32:24<00:23, 3.91s/it] {'loss': 1.1871, 'grad_norm': 0.0009209464182705508, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:24<00:23, 3.91s/it] 99%|█████████▉| 515/520 [32:28<00:19, 3.90s/it] {'loss': 1.2335, 'grad_norm': 0.0011618161711320475, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:28<00:19, 3.90s/it] 99%|█████████▉| 516/520 [32:31<00:15, 3.86s/it] {'loss': 1.1432, 'grad_norm': 0.0009664821521067186, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:31<00:15, 3.86s/it] 99%|█████████▉| 517/520 [32:35<00:11, 3.78s/it] {'loss': 1.1761, 'grad_norm': 0.0009323617371279579, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:35<00:11, 3.78s/it] 100%|█████████▉| 518/520 [32:39<00:07, 3.72s/it] {'loss': 1.1525, 'grad_norm': 0.0010377686911523956, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:39<00:07, 3.72s/it] 100%|█████████▉| 519/520 [32:42<00:03, 3.71s/it] {'loss': 1.1469, 'grad_norm': 0.0009525368948495779, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:42<00:03, 3.71s/it] 100%|██████████| 520/520 [32:47<00:00, 3.94s/it] {'loss': 1.1416, 'grad_norm': 0.000984177327906943, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:47<00:00, 3.94s/it] {'train_runtime': 1967.2362, 'train_samples_per_second': 33.819, 'train_steps_per_second': 0.264, 'train_loss': 1.2309551063638466, 'epoch': 1.0} + 100%|██████████| 520/520 [32:47<00:00, 3.94s/it] 100%|██████████| 520/520 [32:47<00:00, 3.78s/it] +[2025-10-13 19:46:57,609] [INFO] [launch.py:348:main] Process 987602 exits successfully. +[2025-10-13 19:46:57,609] [INFO] [launch.py:348:main] Process 987600 exits successfully. +[2025-10-13 19:46:57,610] [INFO] [launch.py:348:main] Process 987601 exits successfully. +[2025-10-13 19:46:58,611] [INFO] [launch.py:348:main] Process 987605 exits successfully. +[2025-10-13 19:46:58,612] [INFO] [launch.py:348:main] Process 987606 exits successfully. +[2025-10-13 19:46:58,612] [INFO] [launch.py:348:main] Process 987603 exits successfully. +[2025-10-13 19:46:58,612] [INFO] [launch.py:348:main] Process 987604 exits successfully. +[2025-10-13 19:47:02,617] [INFO] [launch.py:348:main] Process 987599 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.7_2e-1_connector-5.0_1.7_2e-1_ablation_20251013_191236.log +Timestamp: 2025-10-13 19:47:05 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation_20251013_194705.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation_20251013_194705.log new file mode 100644 index 0000000000000000000000000000000000000000..d3b37b4903783f5eefd4eb37d2a0d18e182847a8 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation_20251013_194705.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation_20251013_194705.log +Timestamp: 2025-10-13 19:47:05 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 19:47:07,755] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:10,424] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 19:47:10,426] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 1.9 --temperature_mlp_text 1.9 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 1.9 --temperature_mlp_vision 1.9 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 1.9 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 19:47:13,043] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:14,074] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 19:47:14,074] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 19:47:14,074] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 19:47:14,074] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 19:47:14,074] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 19:47:14,074] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 19:47:14,074] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 19:47:14,077] [INFO] [launch.py:253:main] process 1007641 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:47:14,079] [INFO] [launch.py:253:main] process 1007642 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:47:14,081] [INFO] [launch.py:253:main] process 1007643 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:47:14,083] [INFO] [launch.py:253:main] process 1007644 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:47:14,085] [INFO] [launch.py:253:main] process 1007645 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:47:14,087] [INFO] [launch.py:253:main] process 1007646 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:47:14,090] [INFO] [launch.py:253:main] process 1007647 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 19:47:14,092] [INFO] [launch.py:253:main] process 1007648 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 19:47:20,603] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:20,835] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:20,928] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:21,015] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:47:21,015] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 19:47:21,038] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:21,056] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:21,056] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:21,079] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:21,106] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 19:47:21,245] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:47:21,339] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:47:21,449] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:47:21,458] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:47:21,461] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:47:21,485] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 19:47:21,511] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.9, 'temperature_mlp': 1.9, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.9, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.9, + "temperature_mlp": 1.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1007641:1007641 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007641:1007641 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007641:1007641 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1007641:1007641 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1007641:1007641 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1007641:1007641 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1007647:1007647 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1007647:1007647 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007647:1007647 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007647:1007647 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1007647:1007647 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1007647:1007647 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1007646:1007646 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1007646:1007646 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007646:1007646 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007646:1007646 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1007646:1007646 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1007646:1007646 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1007645:1007645 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1007645:1007645 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007645:1007645 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007645:1007645 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1007645:1007645 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1007645:1007645 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1007642:1007642 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1007642:1007642 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007642:1007642 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007642:1007642 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1007642:1007642 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1007642:1007642 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Using network Socket +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1007643:1007643 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1007643:1007643 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007643:1007643 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007643:1007643 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1007643:1007643 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1007643:1007643 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1007644:1007644 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1007644:1007644 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007644:1007644 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007644:1007644 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1007644:1007644 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1007644:1007644 [3] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1007648:1007648 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1007648:1007648 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007648:1007648 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007648:1007648 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1007648:1007648 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1007648:1007648 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO ncclCommInitRank comm 0x5568313fa630 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xbc0ec83e8fc6382b - Init START +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO ncclCommInitRank comm 0x5598df5d78d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xbc0ec83e8fc6382b - Init START +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO ncclCommInitRank comm 0x55bb1385f540 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xbc0ec83e8fc6382b - Init START +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO ncclCommInitRank comm 0x557f36f3e880 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xbc0ec83e8fc6382b - Init START +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO ncclCommInitRank comm 0x55974a6baa60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xbc0ec83e8fc6382b - Init START +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO ncclCommInitRank comm 0x557deed306f0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xbc0ec83e8fc6382b - Init START +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO ncclCommInitRank comm 0x5589cd531f90 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xbc0ec83e8fc6382b - Init START +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO ncclCommInitRank comm 0x5596732e1c10 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xbc0ec83e8fc6382b - Init START +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO comm 0x55bb1385f540 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO comm 0x5596732e1c10 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO comm 0x5598df5d78d0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO comm 0x557deed306f0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO comm 0x557f36f3e880 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO comm 0x5568313fa630 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO comm 0x55974a6baa60 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO comm 0x5589cd531f90 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1007644:1009245 [3] NCCL INFO ncclCommInitRank comm 0x5568313fa630 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xbc0ec83e8fc6382b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1007648:1009246 [7] NCCL INFO ncclCommInitRank comm 0x5598df5d78d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xbc0ec83e8fc6382b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1007645:1009225 [4] NCCL INFO ncclCommInitRank comm 0x5589cd531f90 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xbc0ec83e8fc6382b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007641:1009206 [0] NCCL INFO ncclCommInitRank comm 0x557deed306f0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xbc0ec83e8fc6382b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1007642:1009226 [1] NCCL INFO ncclCommInitRank comm 0x5596732e1c10 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xbc0ec83e8fc6382b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007646:1009224 [5] NCCL INFO ncclCommInitRank comm 0x55974a6baa60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xbc0ec83e8fc6382b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1007647:1009223 [6] NCCL INFO ncclCommInitRank comm 0x557f36f3e880 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xbc0ec83e8fc6382b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1007643:1009244 [2] NCCL INFO ncclCommInitRank comm 0x55bb1385f540 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xbc0ec83e8fc6382b - Init COMPLETE +[2025-10-13 19:48:04,388] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 19:48:06,237] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 19:48:24,531 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 19:48:24,537 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:006->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1007644:1014145 [3] NCCL INFO ncclCommInitRank comm 0x7f44f806b650 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xb25c41de0a99b66c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007641:1014141 [0] NCCL INFO ncclCommInitRank comm 0x7f8e2c06b2b0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xb25c41de0a99b66c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007648:1014148 [7] NCCL INFO ncclCommInitRank comm 0x7f2fe4069ff0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xb25c41de0a99b66c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007643:1014146 [2] NCCL INFO ncclCommInitRank comm 0x7fd7f406a930 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xb25c41de0a99b66c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007642:1014142 [1] NCCL INFO ncclCommInitRank comm 0x7f0b8406b250 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xb25c41de0a99b66c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007647:1014143 [6] NCCL INFO ncclCommInitRank comm 0x7fbe1406b630 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xb25c41de0a99b66c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007646:1014144 [5] NCCL INFO ncclCommInitRank comm 0x7f620c06b420 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xb25c41de0a99b66c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1007645:1014147 [4] NCCL INFO ncclCommInitRank comm 0x7f11f006b360 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xb25c41de0a99b66c - Init COMPLETE + 0%| | 1/520 [00:30<4:26:12, 30.78s/it] {'loss': 2.1187, 'grad_norm': 0.01947706094254192, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:30<4:26:12, 30.78s/it] 0%| | 2/520 [00:34<2:08:26, 14.88s/it] {'loss': 2.1121, 'grad_norm': 0.02084826072768415, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:34<2:08:26, 14.88s/it] 1%| | 3/520 [00:38<1:24:09, 9.77s/it] {'loss': 2.2626, 'grad_norm': 0.023839414469728374, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:38<1:24:09, 9.77s/it] 1%| | 4/520 [00:41<1:03:16, 7.36s/it] {'loss': 1.6583, 'grad_norm': 0.006514892286095153, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:41<1:03:16, 7.36s/it] 1%| | 5/520 [00:45<51:39, 6.02s/it] {'loss': 1.6744, 'grad_norm': 0.004709057468720471, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:45<51:39, 6.02s/it] 1%| | 6/520 [00:49<44:41, 5.22s/it] {'loss': 1.4193, 'grad_norm': 0.0031920325324718505, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:49<44:41, 5.22s/it] 1%|▏ | 7/520 [00:52<40:13, 4.70s/it] {'loss': 1.4438, 'grad_norm': 0.003937194549662948, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:52<40:13, 4.70s/it] 2%|▏ | 8/520 [00:57<39:17, 4.60s/it] {'loss': 1.4795, 'grad_norm': 0.0030842236399533885, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:57<39:17, 4.60s/it] 2%|▏ | 9/520 [01:01<38:01, 4.47s/it] {'loss': 1.543, 'grad_norm': 0.00223760320617878, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [01:01<38:01, 4.47s/it] 2%|▏ | 10/520 [01:05<35:52, 4.22s/it] {'loss': 1.3689, 'grad_norm': 0.002368208368884696, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [01:05<35:52, 4.22s/it] 2%|▏ | 11/520 [01:08<34:38, 4.08s/it] {'loss': 1.4505, 'grad_norm': 0.00290422455344628, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [01:08<34:38, 4.08s/it] 2%|▏ | 12/520 [01:12<33:50, 4.00s/it] {'loss': 1.3565, 'grad_norm': 0.0025938572092562046, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [01:12<33:50, 4.00s/it][2025-10-13 19:49:45,977] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:17<35:05, 4.15s/it] {'loss': 1.3882, 'grad_norm': 0.0022124403212157046, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:17<35:05, 4.15s/it] 3%|▎ | 14/520 [01:20<33:50, 4.01s/it] {'loss': 1.4272, 'grad_norm': 0.0018002776270173554, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:20<33:50, 4.01s/it] 3%|▎ | 15/520 [01:24<32:52, 3.91s/it] {'loss': 1.4043, 'grad_norm': 0.0018690019033788994, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:24<32:52, 3.91s/it] 3%|▎ | 16/520 [01:28<32:08, 3.83s/it] {'loss': 1.3806, 'grad_norm': 0.0020215211235420183, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:28<32:08, 3.83s/it] 3%|▎ | 17/520 [01:31<31:31, 3.76s/it] {'loss': 1.4608, 'grad_norm': 0.0015622041114451246, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:31<31:31, 3.76s/it] 3%|▎ | 18/520 [01:35<31:07, 3.72s/it] {'loss': 1.3197, 'grad_norm': 0.0016333875133176228, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:35<31:07, 3.72s/it] 4%|▎ | 19/520 [01:38<30:50, 3.69s/it] {'loss': 1.3627, 'grad_norm': 0.0016856063186146675, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:38<30:50, 3.69s/it] 4%|▍ | 20/520 [01:42<30:39, 3.68s/it] {'loss': 1.3136, 'grad_norm': 0.0021533272489630472, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:42<30:39, 3.68s/it] 4%|▍ | 21/520 [01:46<30:35, 3.68s/it] {'loss': 1.3583, 'grad_norm': 0.0020878822531098422, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:46<30:35, 3.68s/it] 4%|▍ | 22/520 [01:49<30:26, 3.67s/it] {'loss': 1.4591, 'grad_norm': 0.0016996328948415181, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:49<30:26, 3.67s/it] 4%|▍ | 23/520 [01:53<30:15, 3.65s/it] {'loss': 1.3982, 'grad_norm': 0.0014727287033650812, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:53<30:15, 3.65s/it] 5%|▍ | 24/520 [01:57<30:14, 3.66s/it] {'loss': 1.3424, 'grad_norm': 0.0014945583491303112, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:57<30:14, 3.66s/it] 5%|▍ | 25/520 [02:00<30:06, 3.65s/it] {'loss': 1.3966, 'grad_norm': 0.0016293552053882972, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [02:00<30:06, 3.65s/it] 5%|▌ | 26/520 [02:04<30:17, 3.68s/it] {'loss': 1.3706, 'grad_norm': 0.0014744340558708777, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [02:04<30:17, 3.68s/it] 5%|▌ | 27/520 [02:08<30:16, 3.68s/it] {'loss': 1.2932, 'grad_norm': 0.0015144152516035124, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [02:08<30:16, 3.68s/it] 5%|▌ | 28/520 [02:12<30:14, 3.69s/it] {'loss': 1.3061, 'grad_norm': 0.0015197529244681174, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [02:12<30:14, 3.69s/it] 6%|▌ | 29/520 [02:15<30:07, 3.68s/it] {'loss': 1.3285, 'grad_norm': 0.0014613327810920544, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:15<30:07, 3.68s/it] 6%|▌ | 30/520 [02:19<29:54, 3.66s/it] {'loss': 1.4104, 'grad_norm': 0.001273654162644369, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:19<29:54, 3.66s/it] 6%|▌ | 31/520 [02:22<29:48, 3.66s/it] {'loss': 1.3029, 'grad_norm': 0.001311366838517608, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:22<29:48, 3.66s/it] 6%|▌ | 32/520 [02:26<29:46, 3.66s/it] {'loss': 1.2566, 'grad_norm': 0.0013929381483159585, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:26<29:46, 3.66s/it] 6%|▋ | 33/520 [02:30<29:37, 3.65s/it] {'loss': 1.3046, 'grad_norm': 0.0013943699432069606, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:30<29:37, 3.65s/it] 7%|▋ | 34/520 [02:33<29:37, 3.66s/it] {'loss': 1.3028, 'grad_norm': 0.0014940334681355705, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:33<29:37, 3.66s/it] 7%|▋ | 35/520 [02:37<29:34, 3.66s/it] {'loss': 1.3015, 'grad_norm': 0.0015145921929381823, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:37<29:34, 3.66s/it] 7%|▋ | 36/520 [02:41<29:43, 3.68s/it] {'loss': 1.4002, 'grad_norm': 0.00144843194289934, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:41<29:43, 3.68s/it] 7%|▋ | 37/520 [02:45<29:49, 3.70s/it] {'loss': 1.3889, 'grad_norm': 0.0013414246583184674, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:45<29:49, 3.70s/it] 7%|▋ | 38/520 [02:48<29:42, 3.70s/it] {'loss': 1.4761, 'grad_norm': 0.001352546062713942, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:48<29:42, 3.70s/it] 8%|▊ | 39/520 [02:52<29:40, 3.70s/it] {'loss': 1.3327, 'grad_norm': 0.0016522673673015984, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:52<29:40, 3.70s/it] 8%|▊ | 40/520 [02:56<29:55, 3.74s/it] {'loss': 1.3622, 'grad_norm': 0.0013712591813180544, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:56<29:55, 3.74s/it] 8%|▊ | 41/520 [03:00<30:07, 3.77s/it] {'loss': 1.3397, 'grad_norm': 0.00147702928966703, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [03:00<30:07, 3.77s/it] 8%|▊ | 42/520 [03:03<30:16, 3.80s/it] {'loss': 1.3455, 'grad_norm': 0.0017870027824524032, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [03:03<30:16, 3.80s/it] 8%|▊ | 43/520 [03:07<30:08, 3.79s/it] {'loss': 1.2893, 'grad_norm': 0.0012486078077363998, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [03:07<30:08, 3.79s/it] 8%|▊ | 44/520 [03:11<29:41, 3.74s/it] {'loss': 1.3823, 'grad_norm': 0.0013820353589598705, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [03:11<29:41, 3.74s/it] 9%|▊ | 45/520 [03:15<29:36, 3.74s/it] {'loss': 1.3532, 'grad_norm': 0.0014153439840669324, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:15<29:36, 3.74s/it] 9%|▉ | 46/520 [03:18<29:17, 3.71s/it] {'loss': 1.4362, 'grad_norm': 0.001290027436503083, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:18<29:17, 3.71s/it] 9%|▉ | 47/520 [03:22<29:02, 3.68s/it] {'loss': 1.3433, 'grad_norm': 0.0015566841995146212, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:22<29:02, 3.68s/it] 9%|▉ | 48/520 [03:26<28:55, 3.68s/it] {'loss': 1.3231, 'grad_norm': 0.0015568965741449927, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:26<28:55, 3.68s/it] 9%|▉ | 49/520 [03:29<28:49, 3.67s/it] {'loss': 1.3711, 'grad_norm': 0.0013998568621636598, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:29<28:49, 3.67s/it] 10%|▉ | 50/520 [03:33<28:47, 3.68s/it] {'loss': 1.3614, 'grad_norm': 0.0013090147323864274, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:33<28:47, 3.68s/it] 10%|▉ | 51/520 [03:37<29:04, 3.72s/it] {'loss': 1.2943, 'grad_norm': 0.001538226293483408, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:37<29:04, 3.72s/it] 10%|█ | 52/520 [03:41<29:16, 3.75s/it] {'loss': 1.4194, 'grad_norm': 0.0015948861414821322, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:41<29:16, 3.75s/it] 10%|█ | 53/520 [03:44<29:02, 3.73s/it] {'loss': 1.4037, 'grad_norm': 0.0015320478116554802, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:44<29:02, 3.73s/it] 10%|█ | 54/520 [03:48<28:48, 3.71s/it] {'loss': 1.3256, 'grad_norm': 0.0013287558680408831, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:48<28:48, 3.71s/it] 11%|█ | 55/520 [03:52<28:33, 3.69s/it] {'loss': 1.3023, 'grad_norm': 0.001577607981836128, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:52<28:33, 3.69s/it] 11%|█ | 56/520 [03:55<28:27, 3.68s/it] {'loss': 1.4261, 'grad_norm': 0.0015186410464386008, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:55<28:27, 3.68s/it] 11%|█ | 57/520 [03:59<28:14, 3.66s/it] {'loss': 1.2828, 'grad_norm': 0.0016018210060482455, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:59<28:14, 3.66s/it] 11%|█ | 58/520 [04:02<28:09, 3.66s/it] {'loss': 1.4486, 'grad_norm': 0.001369691276611739, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [04:02<28:09, 3.66s/it] 11%|█▏ | 59/520 [04:06<28:07, 3.66s/it] {'loss': 1.2615, 'grad_norm': 0.001318206898513833, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [04:06<28:07, 3.66s/it] 12%|█▏ | 60/520 [04:10<27:59, 3.65s/it] {'loss': 1.3537, 'grad_norm': 0.0013011362007662015, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:10<27:59, 3.65s/it] 12%|█▏ | 61/520 [04:13<27:54, 3.65s/it] {'loss': 1.3446, 'grad_norm': 0.0014160022702709317, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:13<27:54, 3.65s/it] 12%|█▏ | 62/520 [04:17<27:55, 3.66s/it] {'loss': 1.3355, 'grad_norm': 0.00147348443304698, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:17<27:55, 3.66s/it] 12%|█▏ | 63/520 [04:21<27:53, 3.66s/it] {'loss': 1.321, 'grad_norm': 0.001240958749937683, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:21<27:53, 3.66s/it] 12%|█▏ | 64/520 [04:24<27:48, 3.66s/it] {'loss': 1.3515, 'grad_norm': 0.00137304175268864, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:24<27:48, 3.66s/it] 12%|█▎ | 65/520 [04:28<27:42, 3.65s/it] {'loss': 1.3586, 'grad_norm': 0.001663287534889224, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:28<27:42, 3.65s/it] 13%|█▎ | 66/520 [04:32<27:37, 3.65s/it] {'loss': 1.3126, 'grad_norm': 0.0013005564848787273, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:32<27:37, 3.65s/it] 13%|█▎ | 67/520 [04:35<27:33, 3.65s/it] {'loss': 1.22, 'grad_norm': 0.0013724696052085858, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:35<27:33, 3.65s/it] 13%|█▎ | 68/520 [04:39<27:25, 3.64s/it] {'loss': 1.2833, 'grad_norm': 0.0014223610746461964, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:39<27:25, 3.64s/it] 13%|█▎ | 69/520 [04:43<27:18, 3.63s/it] {'loss': 1.2678, 'grad_norm': 0.0013867556131702645, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:43<27:18, 3.63s/it] 13%|█▎ | 70/520 [04:46<27:08, 3.62s/it] {'loss': 1.2947, 'grad_norm': 0.0015203491055582501, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:46<27:08, 3.62s/it] 14%|█▎ | 71/520 [04:50<27:04, 3.62s/it] {'loss': 1.238, 'grad_norm': 0.001232151637333944, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:50<27:04, 3.62s/it] 14%|█▍ | 72/520 [04:53<27:00, 3.62s/it] {'loss': 1.3836, 'grad_norm': 0.0014988814585204856, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:53<27:00, 3.62s/it] 14%|█▍ | 73/520 [04:57<26:58, 3.62s/it] {'loss': 1.2157, 'grad_norm': 0.0013389801008561572, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:57<26:58, 3.62s/it] 14%|█▍ | 74/520 [05:01<26:55, 3.62s/it] {'loss': 1.327, 'grad_norm': 0.0014042317741596218, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [05:01<26:55, 3.62s/it] 14%|█▍ | 75/520 [05:04<26:51, 3.62s/it] {'loss': 1.235, 'grad_norm': 0.0012362376195963875, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [05:04<26:51, 3.62s/it] 15%|█▍ | 76/520 [05:08<26:46, 3.62s/it] {'loss': 1.389, 'grad_norm': 0.001323075703580375, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:08<26:46, 3.62s/it] 15%|█▍ | 77/520 [05:11<26:41, 3.62s/it] {'loss': 1.1535, 'grad_norm': 0.0013696433625836394, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:11<26:41, 3.62s/it] 15%|█▌ | 78/520 [05:15<26:40, 3.62s/it] {'loss': 1.2742, 'grad_norm': 0.0014344486433211243, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:15<26:40, 3.62s/it] 15%|█▌ | 79/520 [05:19<26:36, 3.62s/it] {'loss': 1.2627, 'grad_norm': 0.0013595489988714588, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:19<26:36, 3.62s/it] 15%|█▌ | 80/520 [05:22<26:37, 3.63s/it] {'loss': 1.376, 'grad_norm': 0.0013253266353793404, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:22<26:37, 3.63s/it] 16%|█▌ | 81/520 [05:26<26:35, 3.63s/it] {'loss': 1.4041, 'grad_norm': 0.0019467955844949046, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:26<26:35, 3.63s/it] 16%|█▌ | 82/520 [05:30<26:47, 3.67s/it] {'loss': 1.3315, 'grad_norm': 0.001290219365454795, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:30<26:47, 3.67s/it] 16%|█▌ | 83/520 [05:34<26:55, 3.70s/it] {'loss': 1.3453, 'grad_norm': 0.0013315385791023069, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:34<26:55, 3.70s/it] 16%|█▌ | 84/520 [05:37<27:00, 3.72s/it] {'loss': 1.3567, 'grad_norm': 0.0013352045846279552, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:37<27:00, 3.72s/it] 16%|█▋ | 85/520 [05:41<27:04, 3.73s/it] {'loss': 1.3963, 'grad_norm': 0.0012684785453014435, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:41<27:04, 3.73s/it] 17%|█▋ | 86/520 [05:45<27:03, 3.74s/it] {'loss': 1.3882, 'grad_norm': 0.0013018188867101495, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:45<27:03, 3.74s/it] 17%|█▋ | 87/520 [05:49<27:05, 3.75s/it] {'loss': 1.3182, 'grad_norm': 0.0012468126107947931, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:49<27:05, 3.75s/it] 17%|█▋ | 88/520 [05:52<27:04, 3.76s/it] {'loss': 1.266, 'grad_norm': 0.0010240890396236546, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:52<27:04, 3.76s/it] 17%|█▋ | 89/520 [05:56<27:03, 3.77s/it] {'loss': 1.3461, 'grad_norm': 0.0013993590639710684, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:56<27:03, 3.77s/it] 17%|█▋ | 90/520 [06:00<27:01, 3.77s/it] {'loss': 1.278, 'grad_norm': 0.0012430640073446612, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [06:00<27:01, 3.77s/it] 18%|█▊ | 91/520 [06:04<27:02, 3.78s/it] {'loss': 1.3541, 'grad_norm': 0.0011971796076288177, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:04<27:02, 3.78s/it] 18%|█▊ | 92/520 [06:07<26:49, 3.76s/it] {'loss': 1.2855, 'grad_norm': 0.0013041901622509234, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:07<26:49, 3.76s/it] 18%|█▊ | 93/520 [06:11<26:36, 3.74s/it] {'loss': 1.3045, 'grad_norm': 0.00147233397173856, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:11<26:36, 3.74s/it] 18%|█▊ | 94/520 [06:15<26:17, 3.70s/it] {'loss': 1.3814, 'grad_norm': 0.0013121627635911768, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:15<26:17, 3.70s/it] 18%|█▊ | 95/520 [06:18<26:06, 3.69s/it] {'loss': 1.2856, 'grad_norm': 0.0016357306894759934, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:18<26:06, 3.69s/it] 18%|█▊ | 96/520 [06:22<25:57, 3.67s/it] {'loss': 1.2943, 'grad_norm': 0.0010722494008086796, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:22<25:57, 3.67s/it] 19%|█▊ | 97/520 [06:26<25:53, 3.67s/it] {'loss': 1.2598, 'grad_norm': 0.0015514446343106486, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:26<25:53, 3.67s/it] 19%|█▉ | 98/520 [06:29<25:46, 3.67s/it] {'loss': 1.2662, 'grad_norm': 0.0012075704877320607, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:29<25:46, 3.67s/it] 19%|█▉ | 99/520 [06:33<25:45, 3.67s/it] {'loss': 1.2776, 'grad_norm': 0.001444500721077481, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:33<25:45, 3.67s/it] 19%|█▉ | 100/520 [06:37<25:40, 3.67s/it] {'loss': 1.2575, 'grad_norm': 0.00123850687074125, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:37<25:40, 3.67s/it] 19%|█▉ | 101/520 [06:40<25:37, 3.67s/it] {'loss': 1.2826, 'grad_norm': 0.001251689696880125, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:40<25:37, 3.67s/it] 20%|█▉ | 102/520 [06:44<25:32, 3.67s/it] {'loss': 1.2946, 'grad_norm': 0.0015063325661249538, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:44<25:32, 3.67s/it] 20%|█▉ | 103/520 [06:48<25:28, 3.67s/it] {'loss': 1.2151, 'grad_norm': 0.0011930368989355067, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:48<25:28, 3.67s/it] 20%|██ | 104/520 [06:51<25:25, 3.67s/it] {'loss': 1.2889, 'grad_norm': 0.0012457406979608046, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:51<25:25, 3.67s/it] 20%|██ | 105/520 [06:55<25:20, 3.66s/it] {'loss': 1.2815, 'grad_norm': 0.001160394363685437, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:55<25:20, 3.66s/it] 20%|██ | 106/520 [06:59<25:16, 3.66s/it] {'loss': 1.289, 'grad_norm': 0.0011943912567381797, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:59<25:16, 3.66s/it] 21%|██ | 107/520 [07:02<25:13, 3.66s/it] {'loss': 1.2651, 'grad_norm': 0.0011884296362175912, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [07:02<25:13, 3.66s/it] 21%|██ | 108/520 [07:06<25:16, 3.68s/it] {'loss': 1.2458, 'grad_norm': 0.0012820119294465761, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [07:06<25:16, 3.68s/it] 21%|██ | 109/520 [07:10<25:08, 3.67s/it] {'loss': 1.2396, 'grad_norm': 0.0010580170425414446, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:10<25:08, 3.67s/it] 21%|██ | 110/520 [07:13<25:07, 3.68s/it] {'loss': 1.4259, 'grad_norm': 0.0013203520401722083, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:13<25:07, 3.68s/it] 21%|██▏ | 111/520 [07:17<25:01, 3.67s/it] {'loss': 1.4236, 'grad_norm': 0.0013280007095181248, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:17<25:01, 3.67s/it] 22%|██▏ | 112/520 [07:21<24:53, 3.66s/it] {'loss': 1.3072, 'grad_norm': 0.0011798575796633235, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:21<24:53, 3.66s/it] 22%|██▏ | 113/520 [07:24<24:48, 3.66s/it] {'loss': 1.2007, 'grad_norm': 0.0011936959430316806, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:24<24:48, 3.66s/it] 22%|██▏ | 114/520 [07:28<24:42, 3.65s/it] {'loss': 1.2907, 'grad_norm': 0.0011649624540695418, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:28<24:42, 3.65s/it] 22%|██▏ | 115/520 [07:32<24:43, 3.66s/it] {'loss': 1.3963, 'grad_norm': 0.0011360568512207218, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:32<24:43, 3.66s/it] 22%|██▏ | 116/520 [07:36<25:01, 3.72s/it] {'loss': 1.4039, 'grad_norm': 0.0011559067066875303, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:36<25:01, 3.72s/it] 22%|██▎ | 117/520 [07:39<25:07, 3.74s/it] {'loss': 1.3735, 'grad_norm': 0.0012750808195582013, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:39<25:07, 3.74s/it] 23%|██▎ | 118/520 [07:43<25:06, 3.75s/it] {'loss': 1.2859, 'grad_norm': 0.0011616717000678828, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:43<25:06, 3.75s/it] 23%|██▎ | 119/520 [07:47<25:04, 3.75s/it] {'loss': 1.2437, 'grad_norm': 0.0012889135630059926, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:47<25:04, 3.75s/it] 23%|██▎ | 120/520 [07:51<25:00, 3.75s/it] {'loss': 1.2527, 'grad_norm': 0.001841200488026842, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:51<25:00, 3.75s/it] 23%|██▎ | 121/520 [07:54<25:01, 3.76s/it] {'loss': 1.3114, 'grad_norm': 0.0013775843173769124, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:54<25:01, 3.76s/it] 23%|██▎ | 122/520 [07:58<25:02, 3.77s/it] {'loss': 1.2138, 'grad_norm': 0.0011281646713998145, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:58<25:02, 3.77s/it] 24%|██▎ | 123/520 [08:02<24:53, 3.76s/it] {'loss': 1.3306, 'grad_norm': 0.0011806487923036843, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [08:02<24:53, 3.76s/it] 24%|██▍ | 124/520 [08:06<24:49, 3.76s/it] {'loss': 1.2749, 'grad_norm': 0.0012433348125480722, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:06<24:49, 3.76s/it] 24%|██▍ | 125/520 [08:09<24:46, 3.76s/it] {'loss': 1.2704, 'grad_norm': 0.0012613783000708953, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:09<24:46, 3.76s/it] 24%|██▍ | 126/520 [08:14<26:00, 3.96s/it] {'loss': 1.2581, 'grad_norm': 0.0010581884521279833, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:14<26:00, 3.96s/it] 24%|██▍ | 127/520 [08:18<25:34, 3.90s/it] {'loss': 1.2512, 'grad_norm': 0.0014161929007144793, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:18<25:34, 3.90s/it] 25%|██▍ | 128/520 [08:21<25:16, 3.87s/it] {'loss': 1.294, 'grad_norm': 0.0013741566574625198, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:21<25:16, 3.87s/it] 25%|██▍ | 129/520 [08:25<25:05, 3.85s/it] {'loss': 1.2572, 'grad_norm': 0.0011705793247567637, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:25<25:05, 3.85s/it] 25%|██▌ | 130/520 [08:29<24:52, 3.83s/it] {'loss': 1.2828, 'grad_norm': 0.0010670466743542698, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:29<24:52, 3.83s/it] 25%|██▌ | 131/520 [08:33<24:42, 3.81s/it] {'loss': 1.2282, 'grad_norm': 0.0010495747224633636, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:33<24:42, 3.81s/it] 25%|██▌ | 132/520 [08:37<24:31, 3.79s/it] {'loss': 1.3336, 'grad_norm': 0.0012807842624240672, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:37<24:31, 3.79s/it] 26%|██▌ | 133/520 [08:40<24:23, 3.78s/it] {'loss': 1.2542, 'grad_norm': 0.0012953626939660701, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:40<24:23, 3.78s/it] 26%|██▌ | 134/520 [08:44<24:17, 3.78s/it] {'loss': 1.3314, 'grad_norm': 0.0012279515554436617, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:44<24:17, 3.78s/it] 26%|██▌ | 135/520 [08:48<24:11, 3.77s/it] {'loss': 1.3786, 'grad_norm': 0.001194451448092794, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:48<24:11, 3.77s/it] 26%|██▌ | 136/520 [08:52<24:06, 3.77s/it] {'loss': 1.3238, 'grad_norm': 0.001174233989934724, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:52<24:06, 3.77s/it] 26%|██▋ | 137/520 [08:55<24:00, 3.76s/it] {'loss': 1.2389, 'grad_norm': 0.0016309434445991619, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:55<24:00, 3.76s/it] 27%|██▋ | 138/520 [08:59<23:57, 3.76s/it] {'loss': 1.2523, 'grad_norm': 0.0010887552394314053, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:59<23:57, 3.76s/it] 27%|██▋ | 139/520 [09:03<24:11, 3.81s/it] {'loss': 1.1372, 'grad_norm': 0.0012810784650987175, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:03<24:11, 3.81s/it] 27%|██▋ | 140/520 [09:07<24:15, 3.83s/it] {'loss': 1.2773, 'grad_norm': 0.0011333873631249136, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:07<24:15, 3.83s/it] 27%|██▋ | 141/520 [09:11<24:18, 3.85s/it] {'loss': 1.3603, 'grad_norm': 0.0011377008869291917, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:11<24:18, 3.85s/it] 27%|██▋ | 142/520 [09:15<24:20, 3.86s/it] {'loss': 1.2856, 'grad_norm': 0.0010455195365584916, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:15<24:20, 3.86s/it] 28%|██▊ | 143/520 [09:19<24:20, 3.87s/it] {'loss': 1.2847, 'grad_norm': 0.0012728152978946769, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:19<24:20, 3.87s/it] 28%|██▊ | 144/520 [09:22<23:51, 3.81s/it] {'loss': 1.2517, 'grad_norm': 0.0012327095713272828, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:22<23:51, 3.81s/it] 28%|██▊ | 145/520 [09:26<23:27, 3.75s/it] {'loss': 1.184, 'grad_norm': 0.0010432335592970383, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:26<23:27, 3.75s/it] 28%|██▊ | 146/520 [09:30<23:11, 3.72s/it] {'loss': 1.3493, 'grad_norm': 0.0011324808052379724, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:30<23:11, 3.72s/it] 28%|██▊ | 147/520 [09:33<23:00, 3.70s/it] {'loss': 1.2313, 'grad_norm': 0.0012210690896111416, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:33<23:00, 3.70s/it] 28%|██▊ | 148/520 [09:37<22:49, 3.68s/it] {'loss': 1.2579, 'grad_norm': 0.0011516418867369576, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:37<22:49, 3.68s/it] 29%|██▊ | 149/520 [09:40<22:44, 3.68s/it] {'loss': 1.1956, 'grad_norm': 0.0011319939132579415, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:40<22:44, 3.68s/it] 29%|██▉ | 150/520 [09:44<22:34, 3.66s/it] {'loss': 1.4268, 'grad_norm': 0.0011742609958753213, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:44<22:34, 3.66s/it] 29%|██▉ | 151/520 [09:48<22:27, 3.65s/it] {'loss': 1.2442, 'grad_norm': 0.0011382963181335369, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:48<22:27, 3.65s/it] 29%|██▉ | 152/520 [09:51<22:22, 3.65s/it] {'loss': 1.2215, 'grad_norm': 0.001229720717732526, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:51<22:22, 3.65s/it] 29%|██▉ | 153/520 [09:55<22:15, 3.64s/it] {'loss': 1.2519, 'grad_norm': 0.0011491103501034463, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:55<22:15, 3.64s/it] 30%|██▉ | 154/520 [09:59<22:11, 3.64s/it] {'loss': 1.3332, 'grad_norm': 0.0011520088106828261, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:59<22:11, 3.64s/it] 30%|██▉ | 155/520 [10:02<22:06, 3.64s/it] {'loss': 1.2486, 'grad_norm': 0.0011697752009496207, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:02<22:06, 3.64s/it] 30%|███ | 156/520 [10:06<22:06, 3.64s/it] {'loss': 1.2722, 'grad_norm': 0.0013090200781045135, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [10:06<22:06, 3.64s/it] 30%|███ | 157/520 [10:10<22:01, 3.64s/it] {'loss': 1.3397, 'grad_norm': 0.0011548261108883329, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:10<22:01, 3.64s/it] 30%|███ | 158/520 [10:13<21:54, 3.63s/it] {'loss': 1.2539, 'grad_norm': 0.001304114499977513, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:13<21:54, 3.63s/it] 31%|███ | 159/520 [10:17<21:50, 3.63s/it] {'loss': 1.2982, 'grad_norm': 0.0011508203950648856, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:17<21:50, 3.63s/it] 31%|███ | 160/520 [10:20<21:45, 3.63s/it] {'loss': 1.3046, 'grad_norm': 0.0012412247383089632, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:20<21:45, 3.63s/it] 31%|███ | 161/520 [10:24<21:42, 3.63s/it] {'loss': 1.2856, 'grad_norm': 0.0012163067899876522, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:24<21:42, 3.63s/it] 31%|███ | 162/520 [10:28<21:40, 3.63s/it] {'loss': 1.2768, 'grad_norm': 0.0011211603198802746, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:28<21:40, 3.63s/it] 31%|███▏ | 163/520 [10:31<21:35, 3.63s/it] {'loss': 1.1769, 'grad_norm': 0.0013871081319542823, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:31<21:35, 3.63s/it] 32%|███▏ | 164/520 [10:35<21:32, 3.63s/it] {'loss': 1.1431, 'grad_norm': 0.001138741261924277, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:35<21:32, 3.63s/it] 32%|███▏ | 165/520 [10:39<21:28, 3.63s/it] {'loss': 1.2872, 'grad_norm': 0.0010765916958514648, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:39<21:28, 3.63s/it] 32%|███▏ | 166/520 [10:42<21:28, 3.64s/it] {'loss': 1.2623, 'grad_norm': 0.001258350275017126, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:42<21:28, 3.64s/it] 32%|███▏ | 167/520 [10:46<21:43, 3.69s/it] {'loss': 1.2585, 'grad_norm': 0.001267644707257486, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:46<21:43, 3.69s/it] 32%|███▏ | 168/520 [10:50<21:59, 3.75s/it] {'loss': 1.2024, 'grad_norm': 0.0011328300145137576, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:50<21:59, 3.75s/it] 32%|███▎ | 169/520 [10:54<22:06, 3.78s/it] {'loss': 1.2759, 'grad_norm': 0.001177187369699117, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:54<22:06, 3.78s/it] 33%|███▎ | 170/520 [10:58<22:14, 3.81s/it] {'loss': 1.226, 'grad_norm': 0.00101019938840335, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:58<22:14, 3.81s/it] 33%|███▎ | 171/520 [11:02<22:22, 3.85s/it] {'loss': 1.2082, 'grad_norm': 0.0011921016156393196, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:02<22:22, 3.85s/it] 33%|███▎ | 172/520 [11:05<22:19, 3.85s/it] {'loss': 1.2839, 'grad_norm': 0.0011542989027107748, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:05<22:19, 3.85s/it] 33%|███▎ | 173/520 [11:09<22:19, 3.86s/it] {'loss': 1.2248, 'grad_norm': 0.001140931632892157, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:09<22:19, 3.86s/it] 33%|███▎ | 174/520 [11:13<22:19, 3.87s/it] {'loss': 1.2722, 'grad_norm': 0.0012215342913884145, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:13<22:19, 3.87s/it] 34%|███▎ | 175/520 [11:17<22:20, 3.88s/it] {'loss': 1.1881, 'grad_norm': 0.001046107931449179, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:17<22:20, 3.88s/it] 34%|███▍ | 176/520 [11:21<22:14, 3.88s/it] {'loss': 1.3094, 'grad_norm': 0.0011380578800239067, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:21<22:14, 3.88s/it] 34%|███▍ | 177/520 [11:25<22:12, 3.88s/it] {'loss': 1.1811, 'grad_norm': 0.0012038451811881656, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:25<22:12, 3.88s/it] 34%|███▍ | 178/520 [11:29<22:07, 3.88s/it] {'loss': 1.2564, 'grad_norm': 0.0012317634053660923, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:29<22:07, 3.88s/it] 34%|███▍ | 179/520 [11:33<22:04, 3.89s/it] {'loss': 1.3333, 'grad_norm': 0.0011139616747866486, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:33<22:04, 3.89s/it] 35%|███▍ | 180/520 [11:37<21:58, 3.88s/it] {'loss': 1.2541, 'grad_norm': 0.0011729447165797764, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:37<21:58, 3.88s/it] 35%|███▍ | 181/520 [11:40<21:55, 3.88s/it] {'loss': 1.2251, 'grad_norm': 0.0010785456347221568, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:40<21:55, 3.88s/it] 35%|███▌ | 182/520 [11:44<21:54, 3.89s/it] {'loss': 1.2354, 'grad_norm': 0.0011324724587257699, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:44<21:54, 3.89s/it] 35%|███▌ | 183/520 [11:48<21:50, 3.89s/it] {'loss': 1.2608, 'grad_norm': 0.001089760999616876, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:48<21:50, 3.89s/it] 35%|███▌ | 184/520 [11:52<21:45, 3.89s/it] {'loss': 1.1978, 'grad_norm': 0.001177200513865714, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:52<21:45, 3.89s/it] 36%|███▌ | 185/520 [11:56<21:40, 3.88s/it] {'loss': 1.3378, 'grad_norm': 0.0011276481115232541, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:56<21:40, 3.88s/it] 36%|███▌ | 186/520 [12:00<21:35, 3.88s/it] {'loss': 1.2183, 'grad_norm': 0.0011654675252486567, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [12:00<21:35, 3.88s/it] 36%|███▌ | 187/520 [12:04<21:30, 3.87s/it] {'loss': 1.2158, 'grad_norm': 0.0012815440629649073, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:04<21:30, 3.87s/it] 36%|███▌ | 188/520 [12:08<21:21, 3.86s/it] {'loss': 1.3004, 'grad_norm': 0.001193759792046511, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:08<21:21, 3.86s/it] 36%|███▋ | 189/520 [12:11<21:17, 3.86s/it] {'loss': 1.3077, 'grad_norm': 0.0010777990982729353, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:11<21:17, 3.86s/it] 37%|███▋ | 190/520 [12:15<21:12, 3.86s/it] {'loss': 1.2304, 'grad_norm': 0.0012591888165404013, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:15<21:12, 3.86s/it] 37%|███▋ | 191/520 [12:19<21:11, 3.86s/it] {'loss': 1.1854, 'grad_norm': 0.001022836938015007, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:19<21:11, 3.86s/it] 37%|███▋ | 192/520 [12:23<21:09, 3.87s/it] {'loss': 1.2658, 'grad_norm': 0.0010764942505206562, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:23<21:09, 3.87s/it] 37%|███▋ | 193/520 [12:27<21:07, 3.88s/it] {'loss': 1.2274, 'grad_norm': 0.0012555954904840563, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:27<21:07, 3.88s/it] 37%|███▋ | 194/520 [12:31<21:02, 3.87s/it] {'loss': 1.1259, 'grad_norm': 0.0012149366195848145, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:31<21:02, 3.87s/it] 38%|███▊ | 195/520 [12:35<20:56, 3.87s/it] {'loss': 1.2859, 'grad_norm': 0.0011855174827048627, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:35<20:56, 3.87s/it] 38%|███▊ | 196/520 [12:38<20:49, 3.86s/it] {'loss': 1.2612, 'grad_norm': 0.0012951795577007773, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:38<20:49, 3.86s/it] 38%|███▊ | 197/520 [12:42<20:47, 3.86s/it] {'loss': 1.2102, 'grad_norm': 0.0011439700565479647, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:42<20:47, 3.86s/it] 38%|███▊ | 198/520 [12:46<20:42, 3.86s/it] {'loss': 1.2832, 'grad_norm': 0.0012231768416421408, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:46<20:42, 3.86s/it] 38%|███▊ | 199/520 [12:50<20:41, 3.87s/it] {'loss': 1.2019, 'grad_norm': 0.0011575155522197374, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:50<20:41, 3.87s/it] 38%|███▊ | 200/520 [12:54<20:37, 3.87s/it] {'loss': 1.1782, 'grad_norm': 0.0011818258340936666, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:54<20:37, 3.87s/it] 39%|███▊ | 201/520 [12:58<20:30, 3.86s/it] {'loss': 1.2029, 'grad_norm': 0.0009980617030562774, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:58<20:30, 3.86s/it] 39%|███▉ | 202/520 [13:02<20:18, 3.83s/it] {'loss': 1.2006, 'grad_norm': 0.0011094080276127152, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [13:02<20:18, 3.83s/it] 39%|███▉ | 203/520 [13:05<19:59, 3.78s/it] {'loss': 1.255, 'grad_norm': 0.0011689266491747186, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:05<19:59, 3.78s/it] 39%|███▉ | 204/520 [13:09<19:43, 3.75s/it] {'loss': 1.268, 'grad_norm': 0.001237164037536932, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:09<19:43, 3.75s/it] 39%|███▉ | 205/520 [13:13<19:34, 3.73s/it] {'loss': 1.2018, 'grad_norm': 0.0011836411668752769, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:13<19:34, 3.73s/it] 40%|███▉ | 206/520 [13:16<19:24, 3.71s/it] {'loss': 1.3032, 'grad_norm': 0.0011286858306787204, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:16<19:24, 3.71s/it] 40%|███▉ | 207/520 [13:20<19:17, 3.70s/it] {'loss': 1.1765, 'grad_norm': 0.0010654983193252607, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:20<19:17, 3.70s/it] 40%|████ | 208/520 [13:24<19:10, 3.69s/it] {'loss': 1.2949, 'grad_norm': 0.0012449910045679593, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:24<19:10, 3.69s/it] 40%|████ | 209/520 [13:27<19:11, 3.70s/it] {'loss': 1.2047, 'grad_norm': 0.0010934829382018242, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:27<19:11, 3.70s/it] 40%|████ | 210/520 [13:31<19:02, 3.69s/it] {'loss': 1.2789, 'grad_norm': 0.0011884492710547718, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:31<19:02, 3.69s/it] 41%|████ | 211/520 [13:35<19:00, 3.69s/it] {'loss': 1.279, 'grad_norm': 0.0010847509611164121, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:35<19:00, 3.69s/it] 41%|████ | 212/520 [13:38<18:51, 3.67s/it] {'loss': 1.2718, 'grad_norm': 0.0011662068594780468, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:38<18:51, 3.67s/it] 41%|████ | 213/520 [13:42<18:48, 3.68s/it] {'loss': 1.2293, 'grad_norm': 0.0012745935758846727, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:42<18:48, 3.68s/it] 41%|████ | 214/520 [13:46<18:45, 3.68s/it] {'loss': 1.2148, 'grad_norm': 0.0011615384155117556, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:46<18:45, 3.68s/it] 41%|████▏ | 215/520 [13:49<18:53, 3.71s/it] {'loss': 1.1361, 'grad_norm': 0.0010856491598999888, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:49<18:53, 3.71s/it] 42%|████▏ | 216/520 [13:53<19:00, 3.75s/it] {'loss': 1.1368, 'grad_norm': 0.0010883789417413154, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:53<19:00, 3.75s/it] 42%|████▏ | 217/520 [13:57<19:05, 3.78s/it] {'loss': 1.2615, 'grad_norm': 0.0011608655629259444, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:57<19:05, 3.78s/it] 42%|████▏ | 218/520 [14:01<19:07, 3.80s/it] {'loss': 1.2456, 'grad_norm': 0.0012054125199246664, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [14:01<19:07, 3.80s/it] 42%|████▏ | 219/520 [14:05<19:05, 3.81s/it] {'loss': 1.2481, 'grad_norm': 0.001026142833338766, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:05<19:05, 3.81s/it] 42%|████▏ | 220/520 [14:09<19:02, 3.81s/it] {'loss': 1.1819, 'grad_norm': 0.001080893001808523, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:09<19:02, 3.81s/it] 42%|████▎ | 221/520 [14:12<18:59, 3.81s/it] {'loss': 1.2513, 'grad_norm': 0.001136797558709087, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:12<18:59, 3.81s/it] 43%|████▎ | 222/520 [14:16<18:55, 3.81s/it] {'loss': 1.19, 'grad_norm': 0.001139673240037722, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:16<18:55, 3.81s/it] 43%|████▎ | 223/520 [14:20<18:51, 3.81s/it] {'loss': 1.1832, 'grad_norm': 0.0010728137087463062, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:20<18:51, 3.81s/it] 43%|████▎ | 224/520 [14:24<18:48, 3.81s/it] {'loss': 1.2448, 'grad_norm': 0.0010314914719711334, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:24<18:48, 3.81s/it] 43%|████▎ | 225/520 [14:28<18:45, 3.82s/it] {'loss': 1.1897, 'grad_norm': 0.0011081864388038385, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:28<18:45, 3.82s/it] 43%|████▎ | 226/520 [14:31<18:41, 3.81s/it] {'loss': 1.2915, 'grad_norm': 0.0010984246104331892, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:31<18:41, 3.81s/it] 44%|████▎ | 227/520 [14:35<18:39, 3.82s/it] {'loss': 1.2732, 'grad_norm': 0.0010655422524801638, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:35<18:39, 3.82s/it] 44%|████▍ | 228/520 [14:39<18:35, 3.82s/it] {'loss': 1.2883, 'grad_norm': 0.0011396994878744569, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:39<18:35, 3.82s/it] 44%|████▍ | 229/520 [14:43<18:32, 3.82s/it] {'loss': 1.249, 'grad_norm': 0.0010452802871787224, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:43<18:32, 3.82s/it] 44%|████▍ | 230/520 [14:47<18:29, 3.83s/it] {'loss': 1.1348, 'grad_norm': 0.0010872284396514784, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:47<18:29, 3.83s/it] 44%|████▍ | 231/520 [14:51<18:26, 3.83s/it] {'loss': 1.2028, 'grad_norm': 0.0010623569437381443, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:51<18:26, 3.83s/it] 45%|████▍ | 232/520 [14:54<18:23, 3.83s/it] {'loss': 1.3246, 'grad_norm': 0.0012826209801087291, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:54<18:23, 3.83s/it] 45%|████▍ | 233/520 [14:58<18:17, 3.82s/it] {'loss': 1.2183, 'grad_norm': 0.00121523988239124, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:58<18:17, 3.82s/it] 45%|████▌ | 234/520 [15:02<18:14, 3.83s/it] {'loss': 1.1555, 'grad_norm': 0.0014626170378732677, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [15:02<18:14, 3.83s/it] 45%|████▌ | 235/520 [15:06<18:15, 3.85s/it] {'loss': 1.2088, 'grad_norm': 0.001182586561240423, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:06<18:15, 3.85s/it] 45%|████▌ | 236/520 [15:10<18:08, 3.83s/it] {'loss': 1.2787, 'grad_norm': 0.0010405945646318395, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:10<18:08, 3.83s/it] 46%|████▌ | 237/520 [15:14<18:01, 3.82s/it] {'loss': 1.2827, 'grad_norm': 0.0011346357995781656, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:14<18:01, 3.82s/it] 46%|████▌ | 238/520 [15:17<17:56, 3.82s/it] {'loss': 1.2133, 'grad_norm': 0.0011960556651565005, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:17<17:56, 3.82s/it] 46%|████▌ | 239/520 [15:21<17:53, 3.82s/it] {'loss': 1.287, 'grad_norm': 0.0011438725501648009, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:21<17:53, 3.82s/it] 46%|████▌ | 240/520 [15:25<17:51, 3.83s/it] {'loss': 1.0952, 'grad_norm': 0.0011139748845900694, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:25<17:51, 3.83s/it] 46%|████▋ | 241/520 [15:29<17:47, 3.83s/it] {'loss': 1.1882, 'grad_norm': 0.001085532136050541, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:29<17:47, 3.83s/it] 47%|████▋ | 242/520 [15:33<17:42, 3.82s/it] {'loss': 1.1978, 'grad_norm': 0.0010614268153410023, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:33<17:42, 3.82s/it] 47%|████▋ | 243/520 [15:37<17:39, 3.82s/it] {'loss': 1.1955, 'grad_norm': 0.0011177136267023046, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:37<17:39, 3.82s/it] 47%|████▋ | 244/520 [15:40<17:34, 3.82s/it] {'loss': 1.3011, 'grad_norm': 0.0011171545907450833, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:40<17:34, 3.82s/it] 47%|████▋ | 245/520 [15:44<17:33, 3.83s/it] {'loss': 1.1697, 'grad_norm': 0.0012294985280509763, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:44<17:33, 3.83s/it] 47%|████▋ | 246/520 [15:48<17:30, 3.83s/it] {'loss': 1.3062, 'grad_norm': 0.0011649770032979307, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:48<17:30, 3.83s/it] 48%|████▊ | 247/520 [15:52<17:24, 3.82s/it] {'loss': 1.3477, 'grad_norm': 0.0011594767551763782, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:52<17:24, 3.82s/it] 48%|████▊ | 248/520 [15:56<17:18, 3.82s/it] {'loss': 1.175, 'grad_norm': 0.001138939817422722, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:56<17:18, 3.82s/it] 48%|████▊ | 249/520 [15:59<17:13, 3.81s/it] {'loss': 1.2652, 'grad_norm': 0.0011331560710061122, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:59<17:13, 3.81s/it] 48%|████▊ | 250/520 [16:03<17:12, 3.82s/it] {'loss': 1.2044, 'grad_norm': 0.0011928824423494888, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:03<17:12, 3.82s/it] 48%|████▊ | 251/520 [16:07<17:07, 3.82s/it] {'loss': 1.2703, 'grad_norm': 0.0010925257507100662, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:07<17:07, 3.82s/it] 48%|████▊ | 252/520 [16:11<16:55, 3.79s/it] {'loss': 1.2155, 'grad_norm': 0.0010844849421759652, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:11<16:55, 3.79s/it] 49%|████▊ | 253/520 [16:15<16:42, 3.76s/it] {'loss': 1.2652, 'grad_norm': 0.0012797111921722996, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:15<16:42, 3.76s/it] 49%|████▉ | 254/520 [16:18<16:32, 3.73s/it] {'loss': 1.2035, 'grad_norm': 0.001059588768709784, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:18<16:32, 3.73s/it] 49%|████▉ | 255/520 [16:22<16:22, 3.71s/it] {'loss': 1.2035, 'grad_norm': 0.0012713660934044356, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:22<16:22, 3.71s/it] 49%|████▉ | 256/520 [16:26<16:15, 3.70s/it] {'loss': 1.2566, 'grad_norm': 0.001204046181964168, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:26<16:15, 3.70s/it] 49%|████▉ | 257/520 [16:29<16:08, 3.68s/it] {'loss': 1.2357, 'grad_norm': 0.0011506715874194063, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:29<16:08, 3.68s/it] 50%|████▉ | 258/520 [16:33<16:04, 3.68s/it] {'loss': 1.2434, 'grad_norm': 0.001056628427652116, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:33<16:04, 3.68s/it] 50%|████▉ | 259/520 [16:36<15:58, 3.67s/it] {'loss': 1.3212, 'grad_norm': 0.0013203423580223426, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:37<15:58, 3.67s/it] 50%|█████ | 260/520 [16:40<15:55, 3.68s/it] {'loss': 1.257, 'grad_norm': 0.0011396983814964006, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:40<15:55, 3.68s/it] 50%|█████ | 261/520 [16:44<15:50, 3.67s/it] {'loss': 1.2007, 'grad_norm': 0.0011646383443547003, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:44<15:50, 3.67s/it] 50%|█████ | 262/520 [16:48<15:48, 3.68s/it] {'loss': 1.1774, 'grad_norm': 0.0011725236894910417, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:48<15:48, 3.68s/it] 51%|█████ | 263/520 [16:51<15:41, 3.66s/it] {'loss': 1.2222, 'grad_norm': 0.001083613486355995, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:51<15:41, 3.66s/it] 51%|█████ | 264/520 [16:55<15:37, 3.66s/it] {'loss': 1.2707, 'grad_norm': 0.00107828178618059, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:55<15:37, 3.66s/it] 51%|█████ | 265/520 [16:58<15:34, 3.66s/it] {'loss': 1.1857, 'grad_norm': 0.001186269604490615, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:58<15:34, 3.66s/it] 51%|█████ | 266/520 [17:02<15:29, 3.66s/it] {'loss': 1.064, 'grad_norm': 0.0010760330394659224, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [17:02<15:29, 3.66s/it] 51%|█████▏ | 267/520 [17:06<15:27, 3.66s/it] {'loss': 1.1828, 'grad_norm': 0.0011355967148253758, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:06<15:27, 3.66s/it] 52%|█████▏ | 268/520 [17:09<15:22, 3.66s/it] {'loss': 1.3267, 'grad_norm': 0.0015099123665551545, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:09<15:22, 3.66s/it] 52%|█████▏ | 269/520 [17:13<15:18, 3.66s/it] {'loss': 1.2856, 'grad_norm': 0.0011501879941181452, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:13<15:18, 3.66s/it] 52%|█████▏ | 270/520 [17:17<15:18, 3.67s/it] {'loss': 1.1516, 'grad_norm': 0.0010454641954671124, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:17<15:18, 3.67s/it] 52%|█████▏ | 271/520 [17:20<15:14, 3.67s/it] {'loss': 1.2714, 'grad_norm': 0.0011366281242829377, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:20<15:14, 3.67s/it] 52%|█████▏ | 272/520 [17:24<15:09, 3.67s/it] {'loss': 1.1715, 'grad_norm': 0.001126489195319799, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:24<15:09, 3.67s/it] 52%|█████▎ | 273/520 [17:28<15:12, 3.69s/it] {'loss': 1.2781, 'grad_norm': 0.0010845436949624644, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:28<15:12, 3.69s/it] 53%|█████▎ | 274/520 [17:32<15:07, 3.69s/it] {'loss': 1.2507, 'grad_norm': 0.0013137207387536472, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:32<15:07, 3.69s/it] 53%|█████▎ | 275/520 [17:35<15:04, 3.69s/it] {'loss': 1.1935, 'grad_norm': 0.001297543074426674, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:35<15:04, 3.69s/it] 53%|█████▎ | 276/520 [17:39<14:58, 3.68s/it] {'loss': 1.2596, 'grad_norm': 0.0013591447634274667, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:39<14:58, 3.68s/it] 53%|█████▎ | 277/520 [17:43<14:55, 3.68s/it] {'loss': 1.2883, 'grad_norm': 0.0010492760512643916, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:43<14:55, 3.68s/it] 53%|█████▎ | 278/520 [17:46<14:51, 3.68s/it] {'loss': 1.1459, 'grad_norm': 0.0010041515224843128, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:46<14:51, 3.68s/it] 54%|█████▎ | 279/520 [17:50<14:47, 3.68s/it] {'loss': 1.1606, 'grad_norm': 0.0011386756284107423, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:50<14:47, 3.68s/it] 54%|█████▍ | 280/520 [17:54<14:43, 3.68s/it] {'loss': 1.1903, 'grad_norm': 0.0014025404579049924, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:54<14:43, 3.68s/it] 54%|█████▍ | 281/520 [17:57<14:39, 3.68s/it] {'loss': 1.2913, 'grad_norm': 0.00119251020470441, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:57<14:39, 3.68s/it] 54%|█████▍ | 282/520 [18:01<14:35, 3.68s/it] {'loss': 1.16, 'grad_norm': 0.0010429086136113828, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [18:01<14:35, 3.68s/it] 54%|█████▍ | 283/520 [18:05<14:34, 3.69s/it] {'loss': 1.3046, 'grad_norm': 0.0012046427905683073, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:05<14:34, 3.69s/it] 55%|█████▍ | 284/520 [18:08<14:30, 3.69s/it] {'loss': 1.173, 'grad_norm': 0.0013100640387133807, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:08<14:30, 3.69s/it] 55%|█████▍ | 285/520 [18:12<14:24, 3.68s/it] {'loss': 1.1831, 'grad_norm': 0.001100788706024027, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:12<14:24, 3.68s/it] 55%|█████▌ | 286/520 [18:16<14:20, 3.68s/it] {'loss': 1.0661, 'grad_norm': 0.0011843172091966683, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:16<14:20, 3.68s/it] 55%|█████▌ | 287/520 [18:19<14:17, 3.68s/it] {'loss': 1.291, 'grad_norm': 0.0011465263732418195, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:19<14:17, 3.68s/it] 55%|█████▌ | 288/520 [18:23<14:13, 3.68s/it] {'loss': 1.3224, 'grad_norm': 0.0010732203055859489, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:23<14:13, 3.68s/it] 56%|█████▌ | 289/520 [18:27<14:10, 3.68s/it] {'loss': 1.1983, 'grad_norm': 0.0010423105946389894, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:27<14:10, 3.68s/it] 56%|█████▌ | 290/520 [18:31<14:07, 3.68s/it] {'loss': 1.1249, 'grad_norm': 0.0010449178724785107, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:31<14:07, 3.68s/it] 56%|█████▌ | 291/520 [18:34<14:02, 3.68s/it] {'loss': 1.1684, 'grad_norm': 0.0010707192229609031, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:34<14:02, 3.68s/it] 56%|█████▌ | 292/520 [18:38<13:58, 3.68s/it] {'loss': 1.2233, 'grad_norm': 0.0010938892990266064, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:38<13:58, 3.68s/it] 56%|█████▋ | 293/520 [18:42<13:53, 3.67s/it] {'loss': 1.1694, 'grad_norm': 0.0011823035529930001, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:42<13:53, 3.67s/it] 57%|█████▋ | 294/520 [18:45<13:51, 3.68s/it] {'loss': 1.1881, 'grad_norm': 0.0012238314345152572, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:45<13:51, 3.68s/it] 57%|█████▋ | 295/520 [18:49<13:46, 3.67s/it] {'loss': 1.2086, 'grad_norm': 0.0011151989834648282, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:49<13:46, 3.67s/it] 57%|█████▋ | 296/520 [18:53<13:40, 3.66s/it] {'loss': 1.1434, 'grad_norm': 0.0011825157765065614, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:53<13:40, 3.66s/it] 57%|█████▋ | 297/520 [18:56<13:39, 3.68s/it] {'loss': 1.2702, 'grad_norm': 0.0012037494599075652, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:56<13:39, 3.68s/it] 57%|█████▋ | 298/520 [19:00<13:35, 3.67s/it] {'loss': 1.2318, 'grad_norm': 0.001065796754466449, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:00<13:35, 3.67s/it] 57%|█████▊ | 299/520 [19:04<13:38, 3.70s/it] {'loss': 1.245, 'grad_norm': 0.001024328376357336, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:04<13:38, 3.70s/it] 58%|█████▊ | 300/520 [19:08<13:46, 3.76s/it] {'loss': 1.2818, 'grad_norm': 0.0011039482688726596, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:08<13:46, 3.76s/it] 58%|█████▊ | 301/520 [19:11<13:48, 3.78s/it] {'loss': 1.2588, 'grad_norm': 0.0010788646473130342, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:11<13:48, 3.78s/it] 58%|█████▊ | 302/520 [19:15<13:52, 3.82s/it] {'loss': 1.2589, 'grad_norm': 0.0012390691545222973, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:15<13:52, 3.82s/it] 58%|█████▊ | 303/520 [19:19<13:52, 3.84s/it] {'loss': 1.1887, 'grad_norm': 0.001296917468941754, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:19<13:52, 3.84s/it] 58%|█████▊ | 304/520 [19:23<13:52, 3.86s/it] {'loss': 1.1605, 'grad_norm': 0.001263710410770588, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:23<13:52, 3.86s/it] 59%|█████▊ | 305/520 [19:27<13:50, 3.86s/it] {'loss': 1.2886, 'grad_norm': 0.0012278680356734204, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:27<13:50, 3.86s/it] 59%|█████▉ | 306/520 [19:31<13:48, 3.87s/it] {'loss': 1.2381, 'grad_norm': 0.0011332844249166188, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:31<13:48, 3.87s/it] 59%|█████▉ | 307/520 [19:35<14:06, 3.97s/it] {'loss': 1.1725, 'grad_norm': 0.001076324711855823, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:35<14:06, 3.97s/it] 59%|█████▉ | 308/520 [19:39<13:55, 3.94s/it] {'loss': 1.2927, 'grad_norm': 0.0011829444636219575, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:39<13:55, 3.94s/it] 59%|█████▉ | 309/520 [19:43<13:45, 3.91s/it] {'loss': 1.1781, 'grad_norm': 0.0010675690767543672, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:43<13:45, 3.91s/it] 60%|█████▉ | 310/520 [19:47<13:38, 3.90s/it] {'loss': 1.158, 'grad_norm': 0.0011106060025516573, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:47<13:38, 3.90s/it] 60%|█████▉ | 311/520 [19:50<13:33, 3.89s/it] {'loss': 1.1306, 'grad_norm': 0.00114407795962898, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:50<13:33, 3.89s/it] 60%|██████ | 312/520 [19:54<13:27, 3.88s/it] {'loss': 1.1223, 'grad_norm': 0.0011883232653347715, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:54<13:27, 3.88s/it] 60%|██████ | 313/520 [19:58<13:24, 3.88s/it] {'loss': 1.112, 'grad_norm': 0.0009999038156341573, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:58<13:24, 3.88s/it] 60%|██████ | 314/520 [20:02<13:38, 3.97s/it] {'loss': 1.1465, 'grad_norm': 0.0010599671320706433, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [20:02<13:38, 3.97s/it] 61%|██████ | 315/520 [20:06<13:18, 3.90s/it] {'loss': 1.2073, 'grad_norm': 0.0013499574994160699, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [20:06<13:18, 3.90s/it] 61%|██████ | 316/520 [20:10<13:27, 3.96s/it] {'loss': 1.13, 'grad_norm': 0.0011755662113112867, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:10<13:27, 3.96s/it] 61%|██████ | 317/520 [20:14<13:12, 3.90s/it] {'loss': 1.1417, 'grad_norm': 0.0009975330304284834, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:14<13:12, 3.90s/it] 61%|██████ | 318/520 [20:18<13:06, 3.89s/it] {'loss': 1.2516, 'grad_norm': 0.0011936807453908, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:18<13:06, 3.89s/it] 61%|██████▏ | 319/520 [20:22<13:15, 3.96s/it] {'loss': 1.1294, 'grad_norm': 0.0010145298849875588, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:22<13:15, 3.96s/it] 62%|██████▏ | 320/520 [20:26<12:52, 3.86s/it] {'loss': 1.0802, 'grad_norm': 0.001088001844628093, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:26<12:52, 3.86s/it] 62%|██████▏ | 321/520 [20:29<12:37, 3.81s/it] {'loss': 1.2732, 'grad_norm': 0.0011627325546517322, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:29<12:37, 3.81s/it] 62%|██████▏ | 322/520 [20:33<12:23, 3.76s/it] {'loss': 1.1045, 'grad_norm': 0.0010455289507442982, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:33<12:23, 3.76s/it] 62%|██████▏ | 323/520 [20:37<12:14, 3.73s/it] {'loss': 1.1784, 'grad_norm': 0.001430024316793686, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:37<12:14, 3.73s/it] 62%|██████▏ | 324/520 [20:40<12:06, 3.71s/it] {'loss': 1.2077, 'grad_norm': 0.0010967801808927763, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:40<12:06, 3.71s/it] 62%|██████▎ | 325/520 [20:44<11:58, 3.69s/it] {'loss': 1.2109, 'grad_norm': 0.0011293520632440555, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:44<11:58, 3.69s/it] 63%|██████▎ | 326/520 [20:48<11:54, 3.68s/it] {'loss': 1.2098, 'grad_norm': 0.0011522569980297397, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:48<11:54, 3.68s/it] 63%|██████▎ | 327/520 [20:51<11:46, 3.66s/it] {'loss': 1.2145, 'grad_norm': 0.001198865263219176, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:51<11:46, 3.66s/it] 63%|██████▎ | 328/520 [20:55<11:40, 3.65s/it] {'loss': 1.2548, 'grad_norm': 0.0011296164451106208, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:55<11:40, 3.65s/it] 63%|██████▎ | 329/520 [20:58<11:37, 3.65s/it] {'loss': 1.1349, 'grad_norm': 0.0009592619849915409, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:58<11:37, 3.65s/it] 63%|██████▎ | 330/520 [21:02<11:32, 3.65s/it] {'loss': 1.2064, 'grad_norm': 0.0010372134727311766, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:02<11:32, 3.65s/it] 64%|██████▎ | 331/520 [21:06<11:28, 3.64s/it] {'loss': 1.1654, 'grad_norm': 0.0010676839054454682, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:06<11:28, 3.64s/it] 64%|██████▍ | 332/520 [21:09<11:25, 3.64s/it] {'loss': 1.2444, 'grad_norm': 0.0010250931088392982, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:09<11:25, 3.64s/it] 64%|██████▍ | 333/520 [21:13<11:19, 3.64s/it] {'loss': 1.3039, 'grad_norm': 0.001153824845136162, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:13<11:19, 3.64s/it] 64%|██████▍ | 334/520 [21:17<11:17, 3.64s/it] {'loss': 1.216, 'grad_norm': 0.001174529151209418, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:17<11:17, 3.64s/it] 64%|██████▍ | 335/520 [21:20<11:12, 3.63s/it] {'loss': 1.2157, 'grad_norm': 0.001018844542649097, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:20<11:12, 3.63s/it] 65%|██████▍ | 336/520 [21:24<11:09, 3.64s/it] {'loss': 1.1155, 'grad_norm': 0.0012297749704609391, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:24<11:09, 3.64s/it] 65%|██████▍ | 337/520 [21:28<11:06, 3.64s/it] {'loss': 1.1029, 'grad_norm': 0.0010911304509666098, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:28<11:06, 3.64s/it] 65%|██████▌ | 338/520 [21:31<11:03, 3.65s/it] {'loss': 1.214, 'grad_norm': 0.0010768478576004819, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:31<11:03, 3.65s/it] 65%|██████▌ | 339/520 [21:35<10:59, 3.65s/it] {'loss': 1.1629, 'grad_norm': 0.001073856532192355, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:35<10:59, 3.65s/it] 65%|██████▌ | 340/520 [21:39<10:56, 3.65s/it] {'loss': 1.1547, 'grad_norm': 0.0011019303419487474, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:39<10:56, 3.65s/it] 66%|██████▌ | 341/520 [21:42<10:52, 3.64s/it] {'loss': 1.1778, 'grad_norm': 0.0011519427394278095, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:42<10:52, 3.64s/it] 66%|██████▌ | 342/520 [21:46<10:48, 3.64s/it] {'loss': 1.2145, 'grad_norm': 0.0014478572917813685, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:46<10:48, 3.64s/it] 66%|██████▌ | 343/520 [21:50<10:48, 3.66s/it] {'loss': 1.1672, 'grad_norm': 0.0010682539797410913, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:50<10:48, 3.66s/it] 66%|██████▌ | 344/520 [21:53<10:43, 3.66s/it] {'loss': 1.1313, 'grad_norm': 0.0010486483033716103, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:53<10:43, 3.66s/it] 66%|██████▋ | 345/520 [21:57<10:40, 3.66s/it] {'loss': 1.239, 'grad_norm': 0.001145185144408755, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:57<10:40, 3.66s/it] 67%|██████▋ | 346/520 [22:01<10:39, 3.68s/it] {'loss': 1.1875, 'grad_norm': 0.0010924334195403875, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:01<10:39, 3.68s/it] 67%|██████▋ | 347/520 [22:04<10:36, 3.68s/it] {'loss': 1.1454, 'grad_norm': 0.0009946341895067845, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:04<10:36, 3.68s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:08<10:30, 3.66s/it] {'loss': 1.1046, 'grad_norm': 0.0013331497636577868, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:08<10:30, 3.66s/it] 67%|██████▋ | 349/520 [22:11<10:23, 3.65s/it] {'loss': 1.1434, 'grad_norm': 0.0010749047222558515, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:11<10:23, 3.65s/it] 67%|██████▋ | 350/520 [22:15<10:19, 3.64s/it] {'loss': 1.1865, 'grad_norm': 0.001144083344836991, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:15<10:19, 3.64s/it] 68%|██████▊ | 351/520 [22:19<10:18, 3.66s/it] {'loss': 1.097, 'grad_norm': 0.0010026762191882176, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:19<10:18, 3.66s/it] 68%|██████▊ | 352/520 [22:23<10:16, 3.67s/it] {'loss': 1.2167, 'grad_norm': 0.0010551796116735784, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:23<10:16, 3.67s/it] 68%|██████▊ | 353/520 [22:26<10:14, 3.68s/it] {'loss': 1.1435, 'grad_norm': 0.00090911357645524, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:26<10:14, 3.68s/it] 68%|██████▊ | 354/520 [22:30<10:09, 3.67s/it] {'loss': 1.2538, 'grad_norm': 0.0010199063381300495, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:30<10:09, 3.67s/it] 68%|██████▊ | 355/520 [22:33<10:03, 3.66s/it] {'loss': 1.156, 'grad_norm': 0.0010574556589009539, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:33<10:03, 3.66s/it] 68%|██████▊ | 356/520 [22:37<10:00, 3.66s/it] {'loss': 1.1597, 'grad_norm': 0.0011154926893712043, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:37<10:00, 3.66s/it] 69%|██████▊ | 357/520 [22:41<09:54, 3.65s/it] {'loss': 1.1869, 'grad_norm': 0.0010734859199835237, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:41<09:54, 3.65s/it] 69%|██████▉ | 358/520 [22:44<09:53, 3.66s/it] {'loss': 1.1264, 'grad_norm': 0.001042898677604715, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:44<09:53, 3.66s/it] 69%|██████▉ | 359/520 [22:48<09:50, 3.67s/it] {'loss': 1.1886, 'grad_norm': 0.0011446996460115501, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:48<09:50, 3.67s/it] 69%|██████▉ | 360/520 [22:52<09:49, 3.68s/it] {'loss': 1.1964, 'grad_norm': 0.0011595598350949124, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:52<09:49, 3.68s/it] 69%|██████▉ | 361/520 [22:56<09:44, 3.68s/it] {'loss': 1.2083, 'grad_norm': 0.0009985410387584224, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:56<09:44, 3.68s/it] 70%|██████▉ | 362/520 [22:59<09:41, 3.68s/it] {'loss': 1.1741, 'grad_norm': 0.0011303880898204106, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:59<09:41, 3.68s/it] 70%|██████▉ | 363/520 [23:03<09:37, 3.68s/it] {'loss': 1.1959, 'grad_norm': 0.001090903943826172, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:03<09:37, 3.68s/it] 70%|███████ | 364/520 [23:07<09:35, 3.69s/it] {'loss': 1.2283, 'grad_norm': 0.0010808601410841855, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:07<09:35, 3.69s/it] 70%|███████ | 365/520 [23:10<09:31, 3.68s/it] {'loss': 1.2534, 'grad_norm': 0.0011434647284464387, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:10<09:31, 3.68s/it] 70%|███████ | 366/520 [23:14<09:27, 3.68s/it] {'loss': 1.2106, 'grad_norm': 0.0011314580171184498, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:14<09:27, 3.68s/it] 71%|███████ | 367/520 [23:18<09:25, 3.70s/it] {'loss': 1.2122, 'grad_norm': 0.0010882701897245118, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:18<09:25, 3.70s/it] 71%|███████ | 368/520 [23:21<09:21, 3.70s/it] {'loss': 1.0649, 'grad_norm': 0.0012313017799636956, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:21<09:21, 3.70s/it] 71%|███████ | 369/520 [23:25<09:17, 3.69s/it] {'loss': 1.1832, 'grad_norm': 0.0009636322356625205, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:25<09:17, 3.69s/it] 71%|███████ | 370/520 [23:29<09:12, 3.69s/it] {'loss': 1.1273, 'grad_norm': 0.001070805064234959, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:29<09:12, 3.69s/it] 71%|███████▏ | 371/520 [23:32<09:07, 3.67s/it] {'loss': 1.1275, 'grad_norm': 0.0010957947623248798, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:32<09:07, 3.67s/it] 72%|███████▏ | 372/520 [23:36<09:04, 3.68s/it] {'loss': 1.2559, 'grad_norm': 0.0010227536521351314, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:36<09:04, 3.68s/it] 72%|███████▏ | 373/520 [23:40<08:59, 3.67s/it] {'loss': 1.1451, 'grad_norm': 0.0011665800146545189, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:40<08:59, 3.67s/it] 72%|███████▏ | 374/520 [23:43<08:54, 3.66s/it] {'loss': 1.2124, 'grad_norm': 0.0010947945668465478, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:43<08:54, 3.66s/it] 72%|███████▏ | 375/520 [23:47<08:52, 3.67s/it] {'loss': 1.1314, 'grad_norm': 0.001075260630815447, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:47<08:52, 3.67s/it] 72%|███████▏ | 376/520 [23:51<08:48, 3.67s/it] {'loss': 1.2376, 'grad_norm': 0.001013395228639211, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:51<08:48, 3.67s/it] 72%|███████▎ | 377/520 [23:54<08:44, 3.67s/it] {'loss': 1.1743, 'grad_norm': 0.0011018192623457987, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:54<08:44, 3.67s/it] 73%|███████▎ | 378/520 [23:58<08:40, 3.67s/it] {'loss': 1.2334, 'grad_norm': 0.0010477200598473263, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:58<08:40, 3.67s/it] 73%|███████▎ | 379/520 [24:02<08:35, 3.65s/it] {'loss': 1.2115, 'grad_norm': 0.0010280445674210473, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:02<08:35, 3.65s/it] 73%|███████▎ | 380/520 [24:05<08:32, 3.66s/it] {'loss': 1.2315, 'grad_norm': 0.0010753766181994364, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:05<08:32, 3.66s/it] 73%|███████▎ | 381/520 [24:09<08:30, 3.67s/it] {'loss': 1.2123, 'grad_norm': 0.0010604111108785466, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:09<08:30, 3.67s/it] 73%|███████▎ | 382/520 [24:13<08:27, 3.67s/it] {'loss': 1.1941, 'grad_norm': 0.0010386280809984276, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:13<08:27, 3.67s/it] 74%|███████▎ | 383/520 [24:16<08:23, 3.68s/it] {'loss': 1.0476, 'grad_norm': 0.0011435366316646788, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:16<08:23, 3.68s/it] 74%|███████▍ | 384/520 [24:20<08:20, 3.68s/it] {'loss': 1.239, 'grad_norm': 0.0010420647516350347, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:20<08:20, 3.68s/it] 74%|███████▍ | 385/520 [24:24<08:14, 3.67s/it] {'loss': 1.1879, 'grad_norm': 0.0009829008209799482, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:24<08:14, 3.67s/it] 74%|███████▍ | 386/520 [24:27<08:10, 3.66s/it] {'loss': 1.146, 'grad_norm': 0.0009427353526205725, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:27<08:10, 3.66s/it] 74%|███████▍ | 387/520 [24:31<08:06, 3.66s/it] {'loss': 1.2588, 'grad_norm': 0.0011031288983346314, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:31<08:06, 3.66s/it] 75%|███████▍ | 388/520 [24:35<08:04, 3.67s/it] {'loss': 1.0968, 'grad_norm': 0.0010312314879255113, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:35<08:04, 3.67s/it] 75%|███████▍ | 389/520 [24:38<08:00, 3.67s/it] {'loss': 1.1457, 'grad_norm': 0.0012082356226941655, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:38<08:00, 3.67s/it] 75%|███████▌ | 390/520 [24:42<07:55, 3.66s/it] {'loss': 1.2076, 'grad_norm': 0.001010821636494913, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:42<07:55, 3.66s/it] 75%|███████▌ | 391/520 [24:46<07:53, 3.67s/it] {'loss': 1.2833, 'grad_norm': 0.0011514407618421767, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:46<07:53, 3.67s/it] 75%|███████▌ | 392/520 [24:49<07:49, 3.67s/it] {'loss': 1.1028, 'grad_norm': 0.001138650037320143, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:49<07:49, 3.67s/it] 76%|███████▌ | 393/520 [24:53<07:44, 3.66s/it] {'loss': 1.1006, 'grad_norm': 0.000902118328020537, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:53<07:44, 3.66s/it] 76%|███████▌ | 394/520 [24:57<07:40, 3.66s/it] {'loss': 1.1662, 'grad_norm': 0.0011206204326422238, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:57<07:40, 3.66s/it] 76%|███████▌ | 395/520 [25:00<07:36, 3.65s/it] {'loss': 1.133, 'grad_norm': 0.0011372532535561361, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:00<07:36, 3.65s/it] 76%|███████▌ | 396/520 [25:04<07:32, 3.65s/it] {'loss': 1.2127, 'grad_norm': 0.0011435131038689988, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:04<07:32, 3.65s/it] 76%|███████▋ | 397/520 [25:08<07:30, 3.66s/it] {'loss': 1.1896, 'grad_norm': 0.0010122287013199555, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:08<07:30, 3.66s/it] 77%|███████▋ | 398/520 [25:11<07:27, 3.67s/it] {'loss': 1.1926, 'grad_norm': 0.0011226553214679677, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:11<07:27, 3.67s/it] 77%|███████▋ | 399/520 [25:15<07:25, 3.68s/it] {'loss': 1.1421, 'grad_norm': 0.0010371946582914857, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:15<07:25, 3.68s/it] 77%|███████▋ | 400/520 [25:19<07:23, 3.69s/it] {'loss': 1.172, 'grad_norm': 0.0009506307616699158, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:19<07:23, 3.69s/it] 77%|███████▋ | 401/520 [25:23<07:24, 3.74s/it] {'loss': 1.0235, 'grad_norm': 0.0011715489236816348, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:23<07:24, 3.74s/it] 77%|███████▋ | 402/520 [25:26<07:24, 3.77s/it] {'loss': 1.1481, 'grad_norm': 0.0010712497855914725, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:26<07:24, 3.77s/it] 78%|███████▊ | 403/520 [25:30<07:24, 3.80s/it] {'loss': 1.1739, 'grad_norm': 0.001175815756638974, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:30<07:24, 3.80s/it] 78%|███████▊ | 404/520 [25:34<07:23, 3.82s/it] {'loss': 1.0788, 'grad_norm': 0.0012367066162928665, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:34<07:23, 3.82s/it] 78%|███████▊ | 405/520 [25:38<07:21, 3.84s/it] {'loss': 1.1543, 'grad_norm': 0.0010650169170582844, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:38<07:21, 3.84s/it] 78%|███████▊ | 406/520 [25:42<07:18, 3.85s/it] {'loss': 1.0713, 'grad_norm': 0.001280619870115349, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:42<07:18, 3.85s/it] 78%|███████▊ | 407/520 [25:46<07:15, 3.85s/it] {'loss': 1.2519, 'grad_norm': 0.001108027624762832, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:46<07:15, 3.85s/it] 78%|███████▊ | 408/520 [25:50<07:09, 3.84s/it] {'loss': 1.1634, 'grad_norm': 0.0012140888796317443, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:50<07:09, 3.84s/it] 79%|███████▊ | 409/520 [25:53<07:05, 3.83s/it] {'loss': 1.2779, 'grad_norm': 0.0011692745593337795, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:53<07:05, 3.83s/it] 79%|███████▉ | 410/520 [25:57<07:02, 3.84s/it] {'loss': 1.0173, 'grad_norm': 0.0011042620451749568, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:57<07:02, 3.84s/it] 79%|███████▉ | 411/520 [26:01<06:59, 3.85s/it] {'loss': 1.2619, 'grad_norm': 0.0012419642439028605, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:01<06:59, 3.85s/it] 79%|███████▉ | 412/520 [26:05<06:56, 3.85s/it] {'loss': 1.1683, 'grad_norm': 0.0011400590404331745, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:05<06:56, 3.85s/it] 79%|███████▉ | 413/520 [26:09<06:52, 3.86s/it] {'loss': 1.1639, 'grad_norm': 0.0010444006097661109, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:09<06:52, 3.86s/it] 80%|███████▉ | 414/520 [26:13<06:48, 3.85s/it] {'loss': 0.9762, 'grad_norm': 0.0008827200272686465, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:13<06:48, 3.85s/it] 80%|███████▉ | 415/520 [26:17<06:43, 3.84s/it] {'loss': 1.1486, 'grad_norm': 0.001023230057480766, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:17<06:43, 3.84s/it] 80%|████████ | 416/520 [26:20<06:39, 3.84s/it] {'loss': 1.0654, 'grad_norm': 0.001150391930766128, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:20<06:39, 3.84s/it] 80%|████████ | 417/520 [26:24<06:34, 3.83s/it] {'loss': 1.2231, 'grad_norm': 0.0011728612986522853, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:24<06:34, 3.83s/it] 80%|████████ | 418/520 [26:28<06:31, 3.84s/it] {'loss': 1.2137, 'grad_norm': 0.0010431523400175333, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:28<06:31, 3.84s/it] 81%|████████ | 419/520 [26:32<06:26, 3.82s/it] {'loss': 1.2052, 'grad_norm': 0.0011849828330859683, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:32<06:26, 3.82s/it] 81%|████████ | 420/520 [26:36<06:22, 3.82s/it] {'loss': 1.0946, 'grad_norm': 0.0011556805808022306, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:36<06:22, 3.82s/it] 81%|████████ | 421/520 [26:39<06:17, 3.82s/it] {'loss': 1.0349, 'grad_norm': 0.0012986817879713723, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:39<06:17, 3.82s/it] 81%|████████ | 422/520 [26:43<06:14, 3.82s/it] {'loss': 1.1487, 'grad_norm': 0.0011438018107406687, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:43<06:14, 3.82s/it] 81%|████████▏ | 423/520 [26:47<06:11, 3.83s/it] {'loss': 1.1308, 'grad_norm': 0.0012093302312857982, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:47<06:11, 3.83s/it] 82%|████████▏ | 424/520 [26:51<06:08, 3.83s/it] {'loss': 1.249, 'grad_norm': 0.001067422574423638, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:51<06:08, 3.83s/it] 82%|████████▏ | 425/520 [26:55<06:03, 3.83s/it] {'loss': 1.1454, 'grad_norm': 0.0010491756279181494, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:55<06:03, 3.83s/it] 82%|████████▏ | 426/520 [26:59<05:59, 3.83s/it] {'loss': 1.1629, 'grad_norm': 0.0013161502613921574, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:59<05:59, 3.83s/it] 82%|████████▏ | 427/520 [27:02<05:55, 3.82s/it] {'loss': 1.0804, 'grad_norm': 0.0010269115425883932, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:02<05:55, 3.82s/it] 82%|████████▏ | 428/520 [27:06<05:49, 3.79s/it] {'loss': 1.063, 'grad_norm': 0.0011179999550349098, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:06<05:49, 3.79s/it] 82%|████████▎ | 429/520 [27:10<05:41, 3.75s/it] {'loss': 1.1549, 'grad_norm': 0.0010898341800038439, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:10<05:41, 3.75s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:13<05:35, 3.73s/it] {'loss': 1.1574, 'grad_norm': 0.0009986479974760565, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:13<05:35, 3.73s/it] 83%|████████▎ | 431/520 [27:17<05:31, 3.73s/it] {'loss': 1.1355, 'grad_norm': 0.001168805509384538, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:17<05:31, 3.73s/it] 83%|████████▎ | 432/520 [27:21<05:25, 3.70s/it] {'loss': 1.0679, 'grad_norm': 0.0010874778682399863, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:21<05:25, 3.70s/it] 83%|████████▎ | 433/520 [27:25<05:20, 3.68s/it] {'loss': 1.196, 'grad_norm': 0.0010509471456686026, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:25<05:20, 3.68s/it] 83%|████████▎ | 434/520 [27:28<05:16, 3.68s/it] {'loss': 0.9486, 'grad_norm': 0.0010819525432658056, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:28<05:16, 3.68s/it] 84%|████████▎ | 435/520 [27:32<05:11, 3.66s/it] {'loss': 1.232, 'grad_norm': 0.0011823378129902538, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:32<05:11, 3.66s/it] 84%|████████▍ | 436/520 [27:36<05:08, 3.68s/it] {'loss': 1.0384, 'grad_norm': 0.0010863905994538386, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:36<05:08, 3.68s/it] 84%|████████▍ | 437/520 [27:39<05:04, 3.67s/it] {'loss': 1.2556, 'grad_norm': 0.001105594292620042, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:39<05:04, 3.67s/it] 84%|████████▍ | 438/520 [27:43<05:00, 3.67s/it] {'loss': 1.0761, 'grad_norm': 0.0010490165218665836, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:43<05:00, 3.67s/it] 84%|████████▍ | 439/520 [27:47<04:57, 3.67s/it] {'loss': 1.1208, 'grad_norm': 0.000915390976266829, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:47<04:57, 3.67s/it] 85%|████████▍ | 440/520 [27:50<04:54, 3.69s/it] {'loss': 1.1091, 'grad_norm': 0.001109250838649135, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:50<04:54, 3.69s/it] 85%|████████▍ | 441/520 [27:54<04:53, 3.71s/it] {'loss': 1.1322, 'grad_norm': 0.001051310404574555, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:54<04:53, 3.71s/it] 85%|████████▌ | 442/520 [27:58<04:52, 3.75s/it] {'loss': 1.1729, 'grad_norm': 0.0011668404805360098, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:58<04:52, 3.75s/it] 85%|████████▌ | 443/520 [28:02<04:50, 3.77s/it] {'loss': 1.1877, 'grad_norm': 0.0010534595279415475, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:02<04:50, 3.77s/it] 85%|████████▌ | 444/520 [28:06<04:48, 3.80s/it] {'loss': 1.1514, 'grad_norm': 0.000992144316980467, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:06<04:48, 3.80s/it] 86%|████████▌ | 445/520 [28:09<04:45, 3.81s/it] {'loss': 1.0814, 'grad_norm': 0.0010441345127576707, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:09<04:45, 3.81s/it] 86%|████████▌ | 446/520 [28:13<04:42, 3.82s/it] {'loss': 1.2093, 'grad_norm': 0.0009951294230142206, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:13<04:42, 3.82s/it] 86%|████████▌ | 447/520 [28:17<04:40, 3.84s/it] {'loss': 1.1563, 'grad_norm': 0.0010687788102967733, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:17<04:40, 3.84s/it] 86%|████████▌ | 448/520 [28:21<04:35, 3.83s/it] {'loss': 1.1491, 'grad_norm': 0.001107066985541497, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:21<04:35, 3.83s/it] 86%|████████▋ | 449/520 [28:25<04:32, 3.84s/it] {'loss': 1.167, 'grad_norm': 0.0010917502225703684, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:25<04:32, 3.84s/it] 87%|████████▋ | 450/520 [28:29<04:28, 3.84s/it] {'loss': 1.1785, 'grad_norm': 0.0010681465222020506, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:29<04:28, 3.84s/it] 87%|████████▋ | 451/520 [28:32<04:25, 3.84s/it] {'loss': 1.1778, 'grad_norm': 0.0010864007099267423, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:32<04:25, 3.84s/it] 87%|████████▋ | 452/520 [28:36<04:21, 3.84s/it] {'loss': 1.2091, 'grad_norm': 0.0009890391532061218, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:36<04:21, 3.84s/it] 87%|████████▋ | 453/520 [28:40<04:17, 3.84s/it] {'loss': 1.1868, 'grad_norm': 0.0010801180121691653, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:40<04:17, 3.84s/it] 87%|████████▋ | 454/520 [28:44<04:13, 3.84s/it] {'loss': 1.0879, 'grad_norm': 0.0011422997843976248, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:44<04:13, 3.84s/it] 88%|████████▊ | 455/520 [28:48<04:09, 3.84s/it] {'loss': 1.2261, 'grad_norm': 0.0010614181120159139, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:48<04:09, 3.84s/it] 88%|████████▊ | 456/520 [28:52<04:05, 3.83s/it] {'loss': 1.153, 'grad_norm': 0.0010781015070930032, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:52<04:05, 3.83s/it] 88%|████████▊ | 457/520 [28:55<04:00, 3.81s/it] {'loss': 1.0936, 'grad_norm': 0.0009401690901014506, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:55<04:00, 3.81s/it] 88%|████████▊ | 458/520 [28:59<03:56, 3.81s/it] {'loss': 1.2795, 'grad_norm': 0.0011704663458266668, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:59<03:56, 3.81s/it] 88%|████████▊ | 459/520 [29:03<03:50, 3.78s/it] {'loss': 1.2126, 'grad_norm': 0.0011317592044734245, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:03<03:50, 3.78s/it] 88%|████████▊ | 460/520 [29:07<03:44, 3.75s/it] {'loss': 1.0998, 'grad_norm': 0.0010421987023139384, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:07<03:44, 3.75s/it] 89%|████████▊ | 461/520 [29:10<03:39, 3.72s/it] {'loss': 1.1729, 'grad_norm': 0.0008469391208298305, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:10<03:39, 3.72s/it] 89%|████████▉ | 462/520 [29:14<03:34, 3.70s/it] {'loss': 1.2557, 'grad_norm': 0.0010167704978236794, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:14<03:34, 3.70s/it] 89%|████████▉ | 463/520 [29:18<03:30, 3.69s/it] {'loss': 1.0581, 'grad_norm': 0.001117769699947746, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:18<03:30, 3.69s/it] 89%|████████▉ | 464/520 [29:21<03:27, 3.71s/it] {'loss': 1.1943, 'grad_norm': 0.0011700098757239266, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:21<03:27, 3.71s/it] 89%|████████▉ | 465/520 [29:25<03:23, 3.71s/it] {'loss': 1.2958, 'grad_norm': 0.0011306018226078414, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:25<03:23, 3.71s/it] 90%|████████▉ | 466/520 [29:29<03:19, 3.69s/it] {'loss': 1.1826, 'grad_norm': 0.0010236304701958824, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:29<03:19, 3.69s/it] 90%|████████▉ | 467/520 [29:32<03:14, 3.68s/it] {'loss': 1.1427, 'grad_norm': 0.0009627354431472058, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:32<03:14, 3.68s/it] 90%|█████████ | 468/520 [29:36<03:10, 3.67s/it] {'loss': 1.1596, 'grad_norm': 0.0011845985216421076, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:36<03:10, 3.67s/it] 90%|█████████ | 469/520 [29:40<03:06, 3.66s/it] {'loss': 1.2202, 'grad_norm': 0.0011988269967583094, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:40<03:06, 3.66s/it] 90%|█████████ | 470/520 [29:43<03:03, 3.68s/it] {'loss': 1.1023, 'grad_norm': 0.0010023354666622044, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:43<03:03, 3.68s/it] 91%|█████████ | 471/520 [29:47<03:01, 3.71s/it] {'loss': 1.1243, 'grad_norm': 0.0011319834368145414, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:47<03:01, 3.71s/it] 91%|█████████ | 472/520 [29:51<02:57, 3.70s/it] {'loss': 1.0928, 'grad_norm': 0.0010844231371464044, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:51<02:57, 3.70s/it] 91%|█████████ | 473/520 [29:54<02:52, 3.68s/it] {'loss': 1.1546, 'grad_norm': 0.0012072618914632895, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:54<02:52, 3.68s/it] 91%|█████████ | 474/520 [29:58<02:48, 3.66s/it] {'loss': 1.1832, 'grad_norm': 0.0010350325908132913, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:58<02:48, 3.66s/it] 91%|█████████▏| 475/520 [30:02<02:45, 3.67s/it] {'loss': 1.1036, 'grad_norm': 0.0010164930084355483, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:02<02:45, 3.67s/it] 92%|█████████▏| 476/520 [30:05<02:41, 3.67s/it] {'loss': 1.1457, 'grad_norm': 0.0010934077847781507, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:05<02:41, 3.67s/it] 92%|█████████▏| 477/520 [30:09<02:37, 3.67s/it] {'loss': 1.1374, 'grad_norm': 0.0012071293786784499, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:09<02:37, 3.67s/it] 92%|█████████▏| 478/520 [30:13<02:34, 3.69s/it] {'loss': 1.0898, 'grad_norm': 0.0010991800699883159, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:13<02:34, 3.69s/it] 92%|█████████▏| 479/520 [30:16<02:31, 3.70s/it] {'loss': 1.1467, 'grad_norm': 0.0011310701479311967, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:16<02:31, 3.70s/it] 92%|█████████▏| 480/520 [30:20<02:28, 3.70s/it] {'loss': 1.1658, 'grad_norm': 0.0009778130704199673, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:20<02:28, 3.70s/it] 92%|█████████▎| 481/520 [30:24<02:25, 3.73s/it] {'loss': 1.156, 'grad_norm': 0.0009802003601563937, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:24<02:25, 3.73s/it] 93%|█████████▎| 482/520 [30:28<02:22, 3.75s/it] {'loss': 1.1755, 'grad_norm': 0.0010084296242913575, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:28<02:22, 3.75s/it] 93%|█████████▎| 483/520 [30:32<02:18, 3.75s/it] {'loss': 1.1534, 'grad_norm': 0.0011665908442454122, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:32<02:18, 3.75s/it] 93%|█████████▎| 484/520 [30:35<02:14, 3.73s/it] {'loss': 1.1599, 'grad_norm': 0.0011235190503906566, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:35<02:14, 3.73s/it] 93%|█████████▎| 485/520 [30:39<02:09, 3.70s/it] {'loss': 1.1164, 'grad_norm': 0.0010465852347786597, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:39<02:09, 3.70s/it] 93%|█████████▎| 486/520 [30:43<02:05, 3.70s/it] {'loss': 1.2347, 'grad_norm': 0.0011370204297859714, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:43<02:05, 3.70s/it] 94%|█████████▎| 487/520 [30:46<02:02, 3.72s/it] {'loss': 1.0894, 'grad_norm': 0.0010925972506973427, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:46<02:02, 3.72s/it] 94%|█████████▍| 488/520 [30:50<01:59, 3.74s/it] {'loss': 1.0372, 'grad_norm': 0.0011020316297921048, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:50<01:59, 3.74s/it] 94%|█████████▍| 489/520 [30:54<01:56, 3.76s/it] {'loss': 1.1784, 'grad_norm': 0.0009036243409435695, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:54<01:56, 3.76s/it] 94%|█████████▍| 490/520 [30:58<01:53, 3.77s/it] {'loss': 1.1539, 'grad_norm': 0.0010782064087835309, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:58<01:53, 3.77s/it] 94%|█████████▍| 491/520 [31:01<01:49, 3.77s/it] {'loss': 1.1232, 'grad_norm': 0.0011119046609156429, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:01<01:49, 3.77s/it] 95%|█████████▍| 492/520 [31:05<01:46, 3.80s/it] {'loss': 1.232, 'grad_norm': 0.0011559449797412253, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:05<01:46, 3.80s/it] 95%|█████████▍| 493/520 [31:09<01:42, 3.79s/it] {'loss': 1.1773, 'grad_norm': 0.0010841877236899137, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:09<01:42, 3.79s/it] 95%|█████████▌| 494/520 [31:13<01:37, 3.75s/it] {'loss': 1.1718, 'grad_norm': 0.0009948916266485945, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:13<01:37, 3.75s/it] 95%|█████████▌| 495/520 [31:17<01:33, 3.75s/it] {'loss': 1.1449, 'grad_norm': 0.0010852655335856114, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:17<01:33, 3.75s/it] 95%|█████████▌| 496/520 [31:20<01:30, 3.77s/it] {'loss': 1.0604, 'grad_norm': 0.0010760131383136275, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:20<01:30, 3.77s/it] 96%|█████████▌| 497/520 [31:24<01:26, 3.78s/it] {'loss': 1.1093, 'grad_norm': 0.0009843761125480398, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:24<01:26, 3.78s/it] 96%|█████████▌| 498/520 [31:28<01:23, 3.78s/it] {'loss': 1.1338, 'grad_norm': 0.0010682947827113296, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:28<01:23, 3.78s/it] 96%|█████████▌| 499/520 [31:32<01:19, 3.79s/it] {'loss': 1.2465, 'grad_norm': 0.0011109832769550425, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:32<01:19, 3.79s/it] 96%|█████████▌| 500/520 [31:36<01:16, 3.80s/it] {'loss': 1.2548, 'grad_norm': 0.0012809521513470203, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:36<01:16, 3.80s/it] 96%|█████████▋| 501/520 [31:39<01:12, 3.80s/it] {'loss': 1.1502, 'grad_norm': 0.0011357638981700326, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:39<01:12, 3.80s/it] 97%|█████████▋| 502/520 [31:43<01:08, 3.80s/it] {'loss': 1.1738, 'grad_norm': 0.0010099330556714887, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:43<01:08, 3.80s/it] 97%|█████████▋| 503/520 [31:47<01:04, 3.80s/it] {'loss': 1.1408, 'grad_norm': 0.0010950485337127217, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:47<01:04, 3.80s/it] 97%|█████████▋| 504/520 [31:51<01:00, 3.80s/it] {'loss': 1.1672, 'grad_norm': 0.0012548958124813817, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:51<01:00, 3.80s/it] 97%|█████████▋| 505/520 [31:55<00:57, 3.80s/it] {'loss': 1.1994, 'grad_norm': 0.001155754803519758, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:55<00:57, 3.80s/it] 97%|█████████▋| 506/520 [31:58<00:53, 3.80s/it] {'loss': 1.1303, 'grad_norm': 0.001092491876927538, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:58<00:53, 3.80s/it] 98%|█████████▊| 507/520 [32:02<00:49, 3.80s/it] {'loss': 1.2853, 'grad_norm': 0.0009755465690739789, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:02<00:49, 3.80s/it] 98%|█████████▊| 508/520 [32:06<00:45, 3.81s/it] {'loss': 1.2416, 'grad_norm': 0.0011093642350961765, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:06<00:45, 3.81s/it] 98%|█████████▊| 509/520 [32:10<00:41, 3.82s/it] {'loss': 1.2168, 'grad_norm': 0.0010377753690511418, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:10<00:41, 3.82s/it] 98%|█████████▊| 510/520 [32:14<00:38, 3.82s/it] {'loss': 1.1619, 'grad_norm': 0.0010587756614851738, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:14<00:38, 3.82s/it] 98%|█████████▊| 511/520 [32:17<00:34, 3.81s/it] {'loss': 1.1375, 'grad_norm': 0.0010218240242472058, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:17<00:34, 3.81s/it] 98%|█████████▊| 512/520 [32:21<00:30, 3.80s/it] {'loss': 1.0226, 'grad_norm': 0.0011529601538589553, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:21<00:30, 3.80s/it] 99%|█████████▊| 513/520 [32:25<00:26, 3.80s/it] {'loss': 1.218, 'grad_norm': 0.001252728581192658, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:25<00:26, 3.80s/it] 99%|█████████▉| 514/520 [32:29<00:22, 3.81s/it] {'loss': 1.1874, 'grad_norm': 0.0009946498851569666, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:29<00:22, 3.81s/it] 99%|█████████▉| 515/520 [32:33<00:19, 3.80s/it] {'loss': 1.2351, 'grad_norm': 0.001250375529036181, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:33<00:19, 3.80s/it] 99%|█████████▉| 516/520 [32:36<00:15, 3.81s/it] {'loss': 1.1432, 'grad_norm': 0.0010444964688570834, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:36<00:15, 3.81s/it] 99%|█████████▉| 517/520 [32:40<00:11, 3.79s/it] {'loss': 1.1789, 'grad_norm': 0.0010007348082935789, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:40<00:11, 3.79s/it] 100%|█████████▉| 518/520 [32:44<00:07, 3.78s/it] {'loss': 1.1553, 'grad_norm': 0.0011862298075800781, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:44<00:07, 3.78s/it] 100%|█████████▉| 519/520 [32:48<00:03, 3.79s/it] {'loss': 1.1498, 'grad_norm': 0.0010444750728277928, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:48<00:03, 3.79s/it] 100%|██████████| 520/520 [32:52<00:00, 4.03s/it] {'loss': 1.1455, 'grad_norm': 0.001004452085454233, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:52<00:00, 4.03s/it] {'train_runtime': 1972.9125, 'train_samples_per_second': 33.721, 'train_steps_per_second': 0.264, 'train_loss': 1.2370943021315794, 'epoch': 1.0} + 100%|██████████| 520/520 [32:52<00:00, 4.03s/it] 100%|██████████| 520/520 [32:52<00:00, 3.79s/it] +[2025-10-13 20:21:27,278] [INFO] [launch.py:348:main] Process 1007645 exits successfully. +[2025-10-13 20:21:28,280] [INFO] [launch.py:348:main] Process 1007647 exits successfully. +[2025-10-13 20:21:28,280] [INFO] [launch.py:348:main] Process 1007648 exits successfully. +[2025-10-13 20:21:28,281] [INFO] [launch.py:348:main] Process 1007642 exits successfully. +[2025-10-13 20:21:28,281] [INFO] [launch.py:348:main] Process 1007643 exits successfully. +[2025-10-13 20:21:29,283] [INFO] [launch.py:348:main] Process 1007646 exits successfully. +[2025-10-13 20:21:29,283] [INFO] [launch.py:348:main] Process 1007644 exits successfully. +[2025-10-13 20:21:32,287] [INFO] [launch.py:348:main] Process 1007641 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_1.9_2e-1_connector-5.0_1.9_2e-1_ablation_20251013_194705.log +Timestamp: 2025-10-13 20:21:34 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation_20251013_202134.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation_20251013_202134.log new file mode 100644 index 0000000000000000000000000000000000000000..268091d6465ef4b25c22adb03203abf2b4617971 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation_20251013_202134.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation_20251013_202134.log +Timestamp: 2025-10-13 20:21:34 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 20:21:37,521] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:40,515] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 20:21:40,517] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 2.1 --temperature_mlp_text 2.1 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 2.1 --temperature_mlp_vision 2.1 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 2.1 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 20:21:43,141] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:44,188] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 20:21:44,188] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 20:21:44,189] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 20:21:44,189] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 20:21:44,189] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 20:21:44,189] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 20:21:44,189] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 20:21:44,191] [INFO] [launch.py:253:main] process 1027373 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:21:44,193] [INFO] [launch.py:253:main] process 1027374 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:21:44,196] [INFO] [launch.py:253:main] process 1027375 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:21:44,198] [INFO] [launch.py:253:main] process 1027376 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:21:44,200] [INFO] [launch.py:253:main] process 1027377 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:21:44,202] [INFO] [launch.py:253:main] process 1027378 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:21:44,204] [INFO] [launch.py:253:main] process 1027379 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:21:44,207] [INFO] [launch.py:253:main] process 1027380 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.1', '--temperature_mlp_text', '2.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.1', '--temperature_mlp_vision', '2.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 20:21:50,938] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:50,995] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:51,064] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:51,064] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:51,064] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:51,075] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:51,082] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:51,086] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:21:51,372] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:21:51,412] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:21:51,473] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:21:51,474] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:21:51,477] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:21:51,488] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:21:51,493] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:21:51,493] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 20:21:51,504] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 2.1, 'temperature_mlp': 2.1, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 2.1, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.1, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.1, + "temperature_mlp": 2.1, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1027373:1027373 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027373:1027373 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027373:1027373 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1027373:1027373 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1027373:1027373 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1027373:1027373 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1027379:1027379 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1027379:1027379 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027379:1027379 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027379:1027379 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1027379:1027379 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1027379:1027379 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1027378:1027378 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1027378:1027378 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027378:1027378 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027378:1027378 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1027378:1027378 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1027378:1027378 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1027376:1027376 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1027376:1027376 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027376:1027376 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027376:1027376 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1027376:1027376 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1027376:1027376 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1027377:1027377 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1027377:1027377 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027377:1027377 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027377:1027377 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1027377:1027377 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1027377:1027377 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1027380:1027380 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1027380:1027380 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027380:1027380 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027380:1027380 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1027380:1027380 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1027380:1027380 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1027375:1027375 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1027375:1027375 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027375:1027375 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027375:1027375 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1027375:1027375 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1027375:1027375 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1027374:1027374 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1027374:1027374 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027374:1027374 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027374:1027374 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1027374:1027374 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1027374:1027374 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO ncclCommInitRank comm 0x55c45247ef60 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5edbbdc72facca1 - Init START +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO ncclCommInitRank comm 0x558e2b2b2910 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5edbbdc72facca1 - Init START +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO ncclCommInitRank comm 0x5652d82261e0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5edbbdc72facca1 - Init START +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO ncclCommInitRank comm 0x56210cbe58e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5edbbdc72facca1 - Init START +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO ncclCommInitRank comm 0x55c4a466fc80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5edbbdc72facca1 - Init START +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO ncclCommInitRank comm 0x55d693fd2ed0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5edbbdc72facca1 - Init START +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO ncclCommInitRank comm 0x55631427d560 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5edbbdc72facca1 - Init START +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO ncclCommInitRank comm 0x55c8c1078530 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5edbbdc72facca1 - Init START +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO comm 0x55c4a466fc80 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO comm 0x56210cbe58e0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO comm 0x55d693fd2ed0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO comm 0x55c45247ef60 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO comm 0x55c8c1078530 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO comm 0x5652d82261e0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO comm 0x55631427d560 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO comm 0x558e2b2b2910 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1027373:1029017 [0] NCCL INFO ncclCommInitRank comm 0x5652d82261e0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5edbbdc72facca1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027376:1029020 [3] NCCL INFO ncclCommInitRank comm 0x55d693fd2ed0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5edbbdc72facca1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1027379:1029018 [6] NCCL INFO ncclCommInitRank comm 0x55631427d560 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5edbbdc72facca1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027380:1029022 [7] NCCL INFO ncclCommInitRank comm 0x55c8c1078530 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5edbbdc72facca1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1027377:1029021 [4] NCCL INFO ncclCommInitRank comm 0x55c4a466fc80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5edbbdc72facca1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1027375:1029056 [2] NCCL INFO ncclCommInitRank comm 0x56210cbe58e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5edbbdc72facca1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027378:1029019 [5] NCCL INFO ncclCommInitRank comm 0x558e2b2b2910 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5edbbdc72facca1 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027374:1029057 [1] NCCL INFO ncclCommInitRank comm 0x55c45247ef60 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5edbbdc72facca1 - Init COMPLETE +[2025-10-13 20:22:33,150] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 20:22:34,979] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 20:22:52,545 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 20:22:52,549 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1027378:1034032 [5] NCCL INFO ncclCommInitRank comm 0x7fbeb006abd0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xa3db82626d7b826a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027377:1034033 [4] NCCL INFO ncclCommInitRank comm 0x7fdf5806a860 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xa3db82626d7b826a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027380:1034034 [7] NCCL INFO ncclCommInitRank comm 0x7f4d90069960 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xa3db82626d7b826a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027376:1034031 [3] NCCL INFO ncclCommInitRank comm 0x7f89b806a9d0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xa3db82626d7b826a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027374:1034037 [1] NCCL INFO ncclCommInitRank comm 0x7f2c2406a820 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xa3db82626d7b826a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027379:1034035 [6] NCCL INFO ncclCommInitRank comm 0x7ff51006a7b0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xa3db82626d7b826a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027375:1034036 [2] NCCL INFO ncclCommInitRank comm 0x7fa14806a3d0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xa3db82626d7b826a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1027373:1034030 [0] NCCL INFO ncclCommInitRank comm 0x7f3fcc06ad30 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xa3db82626d7b826a - Init COMPLETE + 0%| | 1/520 [00:14<2:01:32, 14.05s/it] {'loss': 2.1606, 'grad_norm': 0.022387704481464748, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:01:32, 14.05s/it] 0%| | 2/520 [00:17<1:08:33, 7.94s/it] {'loss': 2.1418, 'grad_norm': 0.023895392307813925, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:08:33, 7.94s/it] 1%| | 3/520 [00:21<51:30, 5.98s/it] {'loss': 2.3007, 'grad_norm': 0.027224840716634866, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:30, 5.98s/it] 1%| | 4/520 [00:25<43:33, 5.06s/it] {'loss': 1.6833, 'grad_norm': 0.007553430222578556, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<43:33, 5.06s/it] 1%| | 5/520 [00:28<39:09, 4.56s/it] {'loss': 1.7037, 'grad_norm': 0.00581473844243812, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:09, 4.56s/it] 1%| | 6/520 [00:32<36:29, 4.26s/it] {'loss': 1.4229, 'grad_norm': 0.0033690255562875315, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:29, 4.26s/it] 1%|▏ | 7/520 [00:36<34:54, 4.08s/it] {'loss': 1.4862, 'grad_norm': 0.006203218056482484, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<34:54, 4.08s/it] 2%|▏ | 8/520 [00:40<35:21, 4.14s/it] {'loss': 1.5087, 'grad_norm': 0.004716891946525741, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:21, 4.14s/it] 2%|▏ | 9/520 [00:44<35:24, 4.16s/it] {'loss': 1.5634, 'grad_norm': 0.0030922696780843746, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<35:24, 4.16s/it] 2%|▏ | 10/520 [00:48<33:59, 4.00s/it] {'loss': 1.3944, 'grad_norm': 0.0029787339365663704, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<33:59, 4.00s/it] 2%|▏ | 11/520 [00:51<33:21, 3.93s/it] {'loss': 1.4724, 'grad_norm': 0.0036168660549862714, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<33:21, 3.93s/it] 2%|▏ | 12/520 [00:55<32:33, 3.84s/it] {'loss': 1.3883, 'grad_norm': 0.0032698665091754195, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<32:33, 3.84s/it][2025-10-13 20:23:57,278] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<33:46, 4.00s/it] {'loss': 1.4085, 'grad_norm': 0.0024403477060463905, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<33:46, 4.00s/it] 3%|▎ | 14/520 [01:03<32:59, 3.91s/it] {'loss': 1.4454, 'grad_norm': 0.0020653397364993244, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<32:59, 3.91s/it] 3%|▎ | 15/520 [01:07<32:18, 3.84s/it] {'loss': 1.4295, 'grad_norm': 0.002222456055640722, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:18, 3.84s/it] 3%|▎ | 16/520 [01:10<31:46, 3.78s/it] {'loss': 1.3886, 'grad_norm': 0.0022941063241123084, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:10<31:46, 3.78s/it] 3%|▎ | 17/520 [01:14<31:23, 3.74s/it] {'loss': 1.4722, 'grad_norm': 0.0017385441236019467, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:14<31:23, 3.74s/it] 3%|▎ | 18/520 [01:18<31:06, 3.72s/it] {'loss': 1.3284, 'grad_norm': 0.001803972673713685, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:06, 3.72s/it] 4%|▎ | 19/520 [01:21<30:54, 3.70s/it] {'loss': 1.3819, 'grad_norm': 0.0018737186826030007, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:21<30:54, 3.70s/it] 4%|▍ | 20/520 [01:25<30:58, 3.72s/it] {'loss': 1.3103, 'grad_norm': 0.00177311682057701, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:25<30:58, 3.72s/it] 4%|▍ | 21/520 [01:29<30:50, 3.71s/it] {'loss': 1.3606, 'grad_norm': 0.0017754497791453612, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<30:50, 3.71s/it] 4%|▍ | 22/520 [01:33<30:40, 3.69s/it] {'loss': 1.4709, 'grad_norm': 0.0018093365321780212, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<30:40, 3.69s/it] 4%|▍ | 23/520 [01:36<30:32, 3.69s/it] {'loss': 1.4112, 'grad_norm': 0.0016813275396013595, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:36<30:32, 3.69s/it] 5%|▍ | 24/520 [01:40<30:20, 3.67s/it] {'loss': 1.3537, 'grad_norm': 0.0016108706369142363, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:40<30:20, 3.67s/it] 5%|▍ | 25/520 [01:44<30:11, 3.66s/it] {'loss': 1.4025, 'grad_norm': 0.0017306627707163872, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:11, 3.66s/it] 5%|▌ | 26/520 [01:47<30:11, 3.67s/it] {'loss': 1.3867, 'grad_norm': 0.0016242894636058155, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:47<30:11, 3.67s/it] 5%|▌ | 27/520 [01:51<30:03, 3.66s/it] {'loss': 1.3041, 'grad_norm': 0.001944781675825911, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:51<30:03, 3.66s/it] 5%|▌ | 28/520 [01:54<29:57, 3.65s/it] {'loss': 1.3163, 'grad_norm': 0.0016602055793358517, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:54<29:57, 3.65s/it] 6%|▌ | 29/520 [01:58<29:53, 3.65s/it] {'loss': 1.3394, 'grad_norm': 0.0016707047344628066, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [01:58<29:53, 3.65s/it] 6%|▌ | 30/520 [02:02<29:50, 3.65s/it] {'loss': 1.4217, 'grad_norm': 0.0014036820286934314, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:02<29:50, 3.65s/it] 6%|▌ | 31/520 [02:05<29:42, 3.64s/it] {'loss': 1.3123, 'grad_norm': 0.0014185224158843675, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:05<29:42, 3.64s/it] 6%|▌ | 32/520 [02:09<29:40, 3.65s/it] {'loss': 1.2756, 'grad_norm': 0.0015370714425352468, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:09<29:40, 3.65s/it] 6%|▋ | 33/520 [02:13<29:38, 3.65s/it] {'loss': 1.3181, 'grad_norm': 0.0015892808018243595, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:13<29:38, 3.65s/it] 7%|▋ | 34/520 [02:16<29:42, 3.67s/it] {'loss': 1.3101, 'grad_norm': 0.0016276785555223458, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:16<29:42, 3.67s/it] 7%|▋ | 35/520 [02:20<29:38, 3.67s/it] {'loss': 1.3183, 'grad_norm': 0.0017779138644805133, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:20<29:38, 3.67s/it] 7%|▋ | 36/520 [02:24<29:32, 3.66s/it] {'loss': 1.4107, 'grad_norm': 0.0014811623202643392, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:24<29:32, 3.66s/it] 7%|▋ | 37/520 [02:27<29:28, 3.66s/it] {'loss': 1.4022, 'grad_norm': 0.0014155156976504183, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:27<29:28, 3.66s/it] 7%|▋ | 38/520 [02:31<29:26, 3.66s/it] {'loss': 1.4878, 'grad_norm': 0.001570940843645732, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:31<29:26, 3.66s/it] 8%|▊ | 39/520 [02:35<29:32, 3.69s/it] {'loss': 1.3401, 'grad_norm': 0.0017656515344521705, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:35<29:32, 3.69s/it] 8%|▊ | 40/520 [02:39<29:45, 3.72s/it] {'loss': 1.3738, 'grad_norm': 0.0015254879237468321, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:39<29:45, 3.72s/it] 8%|▊ | 41/520 [02:42<30:01, 3.76s/it] {'loss': 1.3501, 'grad_norm': 0.0016209379664954692, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:42<30:01, 3.76s/it] 8%|▊ | 42/520 [02:46<30:10, 3.79s/it] {'loss': 1.3649, 'grad_norm': 0.0020794797313448696, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:46<30:10, 3.79s/it] 8%|▊ | 43/520 [02:50<30:16, 3.81s/it] {'loss': 1.3053, 'grad_norm': 0.0013947321469952114, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:50<30:16, 3.81s/it] 8%|▊ | 44/520 [02:54<30:18, 3.82s/it] {'loss': 1.3996, 'grad_norm': 0.0013972284077710306, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<30:18, 3.82s/it] 9%|▊ | 45/520 [02:58<30:17, 3.83s/it] {'loss': 1.3661, 'grad_norm': 0.0015186498060194066, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<30:17, 3.83s/it] 9%|▉ | 46/520 [03:02<30:12, 3.82s/it] {'loss': 1.4519, 'grad_norm': 0.0014058029778120094, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<30:12, 3.82s/it] 9%|▉ | 47/520 [03:05<30:08, 3.82s/it] {'loss': 1.3551, 'grad_norm': 0.0014954146702479438, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:06<30:08, 3.82s/it] 9%|▉ | 48/520 [03:09<30:07, 3.83s/it] {'loss': 1.3331, 'grad_norm': 0.0016602834936245974, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<30:07, 3.83s/it] 9%|▉ | 49/520 [03:13<30:00, 3.82s/it] {'loss': 1.3772, 'grad_norm': 0.0015263661538633742, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<30:00, 3.82s/it] 10%|▉ | 50/520 [03:17<30:03, 3.84s/it] {'loss': 1.3708, 'grad_norm': 0.0014202256429126758, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:17<30:03, 3.84s/it] 10%|▉ | 51/520 [03:21<29:54, 3.83s/it] {'loss': 1.3008, 'grad_norm': 0.0016163170151847939, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:21<29:54, 3.83s/it] 10%|█ | 52/520 [03:25<29:49, 3.82s/it] {'loss': 1.4266, 'grad_norm': 0.0017801921886781758, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:25<29:49, 3.82s/it] 10%|█ | 53/520 [03:28<29:47, 3.83s/it] {'loss': 1.4133, 'grad_norm': 0.0016941186740851374, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:28<29:47, 3.83s/it] 10%|█ | 54/520 [03:32<29:43, 3.83s/it] {'loss': 1.3352, 'grad_norm': 0.001530202102747004, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:32<29:43, 3.83s/it] 11%|█ | 55/520 [03:36<29:41, 3.83s/it] {'loss': 1.3095, 'grad_norm': 0.0016898433185476426, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:36<29:41, 3.83s/it] 11%|█ | 56/520 [03:40<29:35, 3.83s/it] {'loss': 1.4336, 'grad_norm': 0.0016322942625120465, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:40<29:35, 3.83s/it] 11%|█ | 57/520 [03:44<29:33, 3.83s/it] {'loss': 1.2989, 'grad_norm': 0.0018681057721593628, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:44<29:33, 3.83s/it] 11%|█ | 58/520 [03:48<29:29, 3.83s/it] {'loss': 1.4615, 'grad_norm': 0.0015692964711034394, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<29:29, 3.83s/it] 11%|█▏ | 59/520 [03:52<29:33, 3.85s/it] {'loss': 1.2715, 'grad_norm': 0.0013788183375319566, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<29:33, 3.85s/it] 12%|█▏ | 60/520 [03:55<29:20, 3.83s/it] {'loss': 1.3627, 'grad_norm': 0.001410072256421551, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:55<29:20, 3.83s/it] 12%|█▏ | 61/520 [03:59<29:24, 3.84s/it] {'loss': 1.3568, 'grad_norm': 0.0015522937424010262, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<29:24, 3.84s/it] 12%|█▏ | 62/520 [04:03<29:23, 3.85s/it] {'loss': 1.3459, 'grad_norm': 0.0016134362824707616, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<29:23, 3.85s/it] 12%|█▏ | 63/520 [04:07<29:26, 3.86s/it] {'loss': 1.3314, 'grad_norm': 0.0013855928861437516, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<29:26, 3.86s/it] 12%|█▏ | 64/520 [04:11<29:23, 3.87s/it] {'loss': 1.3613, 'grad_norm': 0.0015020089054877256, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:11<29:23, 3.87s/it] 12%|█▎ | 65/520 [04:15<29:30, 3.89s/it] {'loss': 1.3696, 'grad_norm': 0.0018619897398201406, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:15<29:30, 3.89s/it] 13%|█▎ | 66/520 [04:19<29:23, 3.89s/it] {'loss': 1.3205, 'grad_norm': 0.0013089277526643912, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:19<29:23, 3.89s/it] 13%|█▎ | 67/520 [04:23<29:22, 3.89s/it] {'loss': 1.2288, 'grad_norm': 0.0014127533359909069, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:23<29:22, 3.89s/it] 13%|█▎ | 68/520 [04:26<29:19, 3.89s/it] {'loss': 1.2917, 'grad_norm': 0.0015490112686422628, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<29:19, 3.89s/it] 13%|█▎ | 69/520 [04:30<29:12, 3.89s/it] {'loss': 1.2756, 'grad_norm': 0.001802661800090978, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<29:12, 3.89s/it] 13%|█▎ | 70/520 [04:34<29:08, 3.88s/it] {'loss': 1.3062, 'grad_norm': 0.0016049730074653118, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:34<29:08, 3.88s/it] 14%|█▎ | 71/520 [04:38<29:06, 3.89s/it] {'loss': 1.2434, 'grad_norm': 0.0013124398254160305, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:38<29:06, 3.89s/it] 14%|█▍ | 72/520 [04:42<28:59, 3.88s/it] {'loss': 1.3937, 'grad_norm': 0.001648884272530681, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:42<28:59, 3.88s/it] 14%|█▍ | 73/520 [04:46<28:56, 3.88s/it] {'loss': 1.2237, 'grad_norm': 0.0014353724311103608, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:46<28:56, 3.88s/it] 14%|█▍ | 74/520 [04:50<28:55, 3.89s/it] {'loss': 1.3335, 'grad_norm': 0.0015736915575678726, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:50<28:55, 3.89s/it] 14%|█▍ | 75/520 [04:54<28:49, 3.89s/it] {'loss': 1.2435, 'grad_norm': 0.0013577460542598134, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:54<28:49, 3.89s/it] 15%|█▍ | 76/520 [04:58<28:53, 3.90s/it] {'loss': 1.4006, 'grad_norm': 0.0013602571554574543, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:58<28:53, 3.90s/it] 15%|█▍ | 77/520 [05:01<28:44, 3.89s/it] {'loss': 1.1625, 'grad_norm': 0.0014535367138589384, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:01<28:44, 3.89s/it] 15%|█▌ | 78/520 [05:05<28:36, 3.88s/it] {'loss': 1.283, 'grad_norm': 0.0016055756589030128, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:05<28:36, 3.88s/it] 15%|█▌ | 79/520 [05:09<28:31, 3.88s/it] {'loss': 1.2695, 'grad_norm': 0.0014864146198265982, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:09<28:31, 3.88s/it] 15%|█▌ | 80/520 [05:13<28:29, 3.89s/it] {'loss': 1.3878, 'grad_norm': 0.0014262948743914996, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:13<28:29, 3.89s/it] 16%|█▌ | 81/520 [05:17<28:33, 3.90s/it] {'loss': 1.4101, 'grad_norm': 0.001999731113924546, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:17<28:33, 3.90s/it] 16%|█▌ | 82/520 [05:21<28:29, 3.90s/it] {'loss': 1.3389, 'grad_norm': 0.001367213706865338, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:21<28:29, 3.90s/it] 16%|█▌ | 83/520 [05:25<28:26, 3.91s/it] {'loss': 1.3575, 'grad_norm': 0.0015587392406761493, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:25<28:26, 3.91s/it] 16%|█▌ | 84/520 [05:29<28:19, 3.90s/it] {'loss': 1.3678, 'grad_norm': 0.0014937867850561648, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:29<28:19, 3.90s/it] 16%|█▋ | 85/520 [05:33<28:14, 3.89s/it] {'loss': 1.4041, 'grad_norm': 0.0013735048973884667, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:33<28:14, 3.89s/it] 17%|█▋ | 86/520 [05:36<28:09, 3.89s/it] {'loss': 1.397, 'grad_norm': 0.0014901774344904196, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:36<28:09, 3.89s/it] 17%|█▋ | 87/520 [05:40<27:40, 3.83s/it] {'loss': 1.3345, 'grad_norm': 0.001308185509044311, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:40<27:40, 3.83s/it] 17%|█▋ | 88/520 [05:44<27:23, 3.80s/it] {'loss': 1.2796, 'grad_norm': 0.0012780920095823247, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:44<27:23, 3.80s/it] 17%|█▋ | 89/520 [05:48<27:02, 3.76s/it] {'loss': 1.3536, 'grad_norm': 0.0014662433951037354, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:48<27:02, 3.76s/it] 17%|█▋ | 90/520 [05:51<26:49, 3.74s/it] {'loss': 1.289, 'grad_norm': 0.0013472679129585517, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:51<26:49, 3.74s/it] 18%|█▊ | 91/520 [05:55<26:38, 3.73s/it] {'loss': 1.3627, 'grad_norm': 0.0013952922584981587, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:55<26:38, 3.73s/it] 18%|█▊ | 92/520 [05:59<26:28, 3.71s/it] {'loss': 1.2921, 'grad_norm': 0.0014482890464908647, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:59<26:28, 3.71s/it] 18%|█▊ | 93/520 [06:02<26:24, 3.71s/it] {'loss': 1.3115, 'grad_norm': 0.001532114242942951, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:02<26:24, 3.71s/it] 18%|█▊ | 94/520 [06:06<26:14, 3.70s/it] {'loss': 1.3925, 'grad_norm': 0.001383831963004678, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:06<26:14, 3.70s/it] 18%|█▊ | 95/520 [06:10<26:06, 3.69s/it] {'loss': 1.2946, 'grad_norm': 0.002385200946456115, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:10<26:06, 3.69s/it] 18%|█▊ | 96/520 [06:13<26:01, 3.68s/it] {'loss': 1.3014, 'grad_norm': 0.0011674226810241871, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:13<26:01, 3.68s/it] 19%|█▊ | 97/520 [06:17<26:01, 3.69s/it] {'loss': 1.2649, 'grad_norm': 0.0018590107975007377, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:17<26:01, 3.69s/it] 19%|█▉ | 98/520 [06:21<25:55, 3.69s/it] {'loss': 1.2734, 'grad_norm': 0.0013021204336785023, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:21<25:55, 3.69s/it] 19%|█▉ | 99/520 [06:24<25:50, 3.68s/it] {'loss': 1.2907, 'grad_norm': 0.0016880413056512297, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:24<25:50, 3.68s/it] 19%|█▉ | 100/520 [06:28<25:50, 3.69s/it] {'loss': 1.2711, 'grad_norm': 0.0013990836337468109, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:28<25:50, 3.69s/it] 19%|█▉ | 101/520 [06:32<25:42, 3.68s/it] {'loss': 1.2906, 'grad_norm': 0.0014076112941397213, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:32<25:42, 3.68s/it] 20%|█▉ | 102/520 [06:35<25:38, 3.68s/it] {'loss': 1.2983, 'grad_norm': 0.0014601836527409339, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:35<25:38, 3.68s/it] 20%|█▉ | 103/520 [06:39<25:31, 3.67s/it] {'loss': 1.2236, 'grad_norm': 0.0012487217421694328, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:39<25:31, 3.67s/it] 20%|██ | 104/520 [06:43<25:30, 3.68s/it] {'loss': 1.2986, 'grad_norm': 0.0013338394798540077, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:43<25:30, 3.68s/it] 20%|██ | 105/520 [06:46<25:27, 3.68s/it] {'loss': 1.2905, 'grad_norm': 0.0012522958104758432, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:46<25:27, 3.68s/it] 20%|██ | 106/520 [06:50<25:23, 3.68s/it] {'loss': 1.2975, 'grad_norm': 0.0013309835745907485, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:50<25:23, 3.68s/it] 21%|██ | 107/520 [06:54<25:21, 3.69s/it] {'loss': 1.2742, 'grad_norm': 0.001358276006660908, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:54<25:21, 3.69s/it] 21%|██ | 108/520 [06:58<25:48, 3.76s/it] {'loss': 1.2576, 'grad_norm': 0.0014570679797342445, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:58<25:48, 3.76s/it] 21%|██ | 109/520 [07:02<26:03, 3.80s/it] {'loss': 1.2515, 'grad_norm': 0.0012138066358738046, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:02<26:03, 3.80s/it] 21%|██ | 110/520 [07:06<26:11, 3.83s/it] {'loss': 1.4355, 'grad_norm': 0.0015611900790383306, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:06<26:11, 3.83s/it] 21%|██▏ | 111/520 [07:10<26:16, 3.85s/it] {'loss': 1.4358, 'grad_norm': 0.0014657691599536773, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:10<26:16, 3.85s/it] 22%|██▏ | 112/520 [07:13<26:20, 3.87s/it] {'loss': 1.3167, 'grad_norm': 0.001279901831277902, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:13<26:20, 3.87s/it] 22%|██▏ | 113/520 [07:17<26:23, 3.89s/it] {'loss': 1.2104, 'grad_norm': 0.0013196800384769182, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:17<26:23, 3.89s/it] 22%|██▏ | 114/520 [07:21<26:21, 3.90s/it] {'loss': 1.2997, 'grad_norm': 0.0012550966294218163, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:21<26:21, 3.90s/it] 22%|██▏ | 115/520 [07:25<26:24, 3.91s/it] {'loss': 1.4048, 'grad_norm': 0.0012990006875397005, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:25<26:24, 3.91s/it] 22%|██▏ | 116/520 [07:29<26:20, 3.91s/it] {'loss': 1.4161, 'grad_norm': 0.0013793751193469737, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:29<26:20, 3.91s/it] 22%|██▎ | 117/520 [07:33<26:15, 3.91s/it] {'loss': 1.3832, 'grad_norm': 0.001416412707053641, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:33<26:15, 3.91s/it] 23%|██▎ | 118/520 [07:37<26:12, 3.91s/it] {'loss': 1.2923, 'grad_norm': 0.0012437904151922782, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:37<26:12, 3.91s/it] 23%|██▎ | 119/520 [07:41<26:08, 3.91s/it] {'loss': 1.2461, 'grad_norm': 0.0014392530864349097, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:41<26:08, 3.91s/it] 23%|██▎ | 120/520 [07:45<26:00, 3.90s/it] {'loss': 1.2574, 'grad_norm': 0.0017081903923484877, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:45<26:00, 3.90s/it] 23%|██▎ | 121/520 [07:49<25:58, 3.90s/it] {'loss': 1.3162, 'grad_norm': 0.0015027410158662815, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:49<25:58, 3.90s/it] 23%|██▎ | 122/520 [07:53<25:57, 3.91s/it] {'loss': 1.2236, 'grad_norm': 0.0013646480536687233, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:53<25:57, 3.91s/it] 24%|██▎ | 123/520 [07:57<25:54, 3.92s/it] {'loss': 1.3394, 'grad_norm': 0.0012750277578015227, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:57<25:54, 3.92s/it] 24%|██▍ | 124/520 [08:00<25:39, 3.89s/it] {'loss': 1.282, 'grad_norm': 0.0013476728159163378, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:00<25:39, 3.89s/it] 24%|██▍ | 125/520 [08:04<25:32, 3.88s/it] {'loss': 1.2773, 'grad_norm': 0.0013188149547455424, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:04<25:32, 3.88s/it] 24%|██▍ | 126/520 [08:09<26:46, 4.08s/it] {'loss': 1.2669, 'grad_norm': 0.0011392762718435189, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:09<26:46, 4.08s/it] 24%|██▍ | 127/520 [08:13<26:16, 4.01s/it] {'loss': 1.2616, 'grad_norm': 0.0015635861658342505, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:13<26:16, 4.01s/it] 25%|██▍ | 128/520 [08:16<25:52, 3.96s/it] {'loss': 1.3057, 'grad_norm': 0.0015121115639527096, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:16<25:52, 3.96s/it] 25%|██▍ | 129/520 [08:20<25:35, 3.93s/it] {'loss': 1.2626, 'grad_norm': 0.0012615111202772104, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:20<25:35, 3.93s/it] 25%|██▌ | 130/520 [08:24<25:01, 3.85s/it] {'loss': 1.2916, 'grad_norm': 0.0011253920258738309, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:24<25:01, 3.85s/it] 25%|██▌ | 131/520 [08:28<24:35, 3.79s/it] {'loss': 1.2401, 'grad_norm': 0.0011811196831042522, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:28<24:35, 3.79s/it] 25%|██▌ | 132/520 [08:31<24:20, 3.76s/it] {'loss': 1.3454, 'grad_norm': 0.0013786307798249612, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:31<24:20, 3.76s/it] 26%|██▌ | 133/520 [08:35<24:04, 3.73s/it] {'loss': 1.2588, 'grad_norm': 0.0014708633601023698, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:35<24:04, 3.73s/it] 26%|██▌ | 134/520 [08:39<23:59, 3.73s/it] {'loss': 1.3385, 'grad_norm': 0.001291938943299433, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:39<23:59, 3.73s/it] 26%|██▌ | 135/520 [08:42<23:48, 3.71s/it] {'loss': 1.3879, 'grad_norm': 0.0012961166932180814, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:42<23:48, 3.71s/it] 26%|██▌ | 136/520 [08:46<23:38, 3.69s/it] {'loss': 1.3314, 'grad_norm': 0.0013826957197402218, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:46<23:38, 3.69s/it] 26%|██▋ | 137/520 [08:50<23:48, 3.73s/it] {'loss': 1.2502, 'grad_norm': 0.001603399180458339, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:50<23:48, 3.73s/it] 27%|██▋ | 138/520 [08:54<24:01, 3.77s/it] {'loss': 1.2607, 'grad_norm': 0.0011839387941493687, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:54<24:01, 3.77s/it] 27%|██▋ | 139/520 [08:58<24:08, 3.80s/it] {'loss': 1.1481, 'grad_norm': 0.0013387575304649444, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:58<24:08, 3.80s/it] 27%|██▋ | 140/520 [09:01<24:11, 3.82s/it] {'loss': 1.2897, 'grad_norm': 0.0012503331927539198, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:01<24:11, 3.82s/it] 27%|██▋ | 141/520 [09:05<24:13, 3.84s/it] {'loss': 1.3694, 'grad_norm': 0.0012168199979464378, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:05<24:13, 3.84s/it] 27%|██▋ | 142/520 [09:09<24:02, 3.82s/it] {'loss': 1.2977, 'grad_norm': 0.0011460483249513347, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:09<24:02, 3.82s/it] 28%|██▊ | 143/520 [09:13<24:01, 3.82s/it] {'loss': 1.2891, 'grad_norm': 0.0014500973482970973, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:13<24:01, 3.82s/it] 28%|██▊ | 144/520 [09:17<23:43, 3.78s/it] {'loss': 1.254, 'grad_norm': 0.0013463119653469027, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:17<23:43, 3.78s/it] 28%|██▊ | 145/520 [09:20<23:28, 3.76s/it] {'loss': 1.1915, 'grad_norm': 0.0011513829937273158, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:20<23:28, 3.76s/it] 28%|██▊ | 146/520 [09:24<23:16, 3.73s/it] {'loss': 1.3621, 'grad_norm': 0.0012374153228474384, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:24<23:16, 3.73s/it] 28%|██▊ | 147/520 [09:28<23:09, 3.72s/it] {'loss': 1.2383, 'grad_norm': 0.0013536281084720312, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:28<23:09, 3.72s/it] 28%|██▊ | 148/520 [09:31<23:16, 3.75s/it] {'loss': 1.266, 'grad_norm': 0.00121760875900097, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:31<23:16, 3.75s/it] 29%|██▊ | 149/520 [09:35<23:33, 3.81s/it] {'loss': 1.2076, 'grad_norm': 0.0012793075981164242, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:35<23:33, 3.81s/it] 29%|██▉ | 150/520 [09:39<23:47, 3.86s/it] {'loss': 1.4375, 'grad_norm': 0.001312182551738493, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:39<23:47, 3.86s/it] 29%|██▉ | 151/520 [09:43<23:52, 3.88s/it] {'loss': 1.2495, 'grad_norm': 0.0011981965333240272, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:43<23:52, 3.88s/it] 29%|██▉ | 152/520 [09:47<23:52, 3.89s/it] {'loss': 1.2267, 'grad_norm': 0.0013300293184345102, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:47<23:52, 3.89s/it] 29%|██▉ | 153/520 [09:51<23:48, 3.89s/it] {'loss': 1.2557, 'grad_norm': 0.0012277857589189564, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:51<23:48, 3.89s/it] 30%|██▉ | 154/520 [09:55<23:37, 3.87s/it] {'loss': 1.3407, 'grad_norm': 0.0012604727314380678, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:55<23:37, 3.87s/it] 30%|██▉ | 155/520 [09:59<23:11, 3.81s/it] {'loss': 1.2538, 'grad_norm': 0.0012391237404018865, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:59<23:11, 3.81s/it] 30%|███ | 156/520 [10:02<22:57, 3.78s/it] {'loss': 1.2757, 'grad_norm': 0.0013870078881749338, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [10:02<22:57, 3.78s/it] 30%|███ | 157/520 [10:06<22:41, 3.75s/it] {'loss': 1.3547, 'grad_norm': 0.0013301490816681394, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:06<22:41, 3.75s/it] 30%|███ | 158/520 [10:10<22:26, 3.72s/it] {'loss': 1.2595, 'grad_norm': 0.0013764610580672034, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:10<22:26, 3.72s/it] 31%|███ | 159/520 [10:13<22:19, 3.71s/it] {'loss': 1.3047, 'grad_norm': 0.0012440008622764914, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:13<22:19, 3.71s/it] 31%|███ | 160/520 [10:17<22:18, 3.72s/it] {'loss': 1.3111, 'grad_norm': 0.001326115198382121, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:17<22:18, 3.72s/it] 31%|███ | 161/520 [10:21<22:13, 3.71s/it] {'loss': 1.2933, 'grad_norm': 0.0013423178874815821, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:21<22:13, 3.71s/it] 31%|███ | 162/520 [10:25<22:17, 3.74s/it] {'loss': 1.2889, 'grad_norm': 0.0012654712082924551, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:25<22:17, 3.74s/it] 31%|███▏ | 163/520 [10:28<22:20, 3.76s/it] {'loss': 1.1832, 'grad_norm': 0.0015589750536959737, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:28<22:20, 3.76s/it] 32%|███▏ | 164/520 [10:32<22:20, 3.77s/it] {'loss': 1.1525, 'grad_norm': 0.0012102657910765986, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:32<22:20, 3.77s/it] 32%|███▏ | 165/520 [10:36<22:21, 3.78s/it] {'loss': 1.2944, 'grad_norm': 0.001152276875830449, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:36<22:21, 3.78s/it] 32%|███▏ | 166/520 [10:40<22:16, 3.78s/it] {'loss': 1.2739, 'grad_norm': 0.0013673611391790462, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:40<22:16, 3.78s/it] 32%|███▏ | 167/520 [10:44<22:15, 3.78s/it] {'loss': 1.2681, 'grad_norm': 0.0014202628588283013, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:44<22:15, 3.78s/it] 32%|███▏ | 168/520 [10:47<22:15, 3.80s/it] {'loss': 1.2081, 'grad_norm': 0.001296133166811382, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:47<22:15, 3.80s/it] 32%|███▎ | 169/520 [10:51<22:12, 3.80s/it] {'loss': 1.2865, 'grad_norm': 0.0012868206998162828, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:51<22:12, 3.80s/it] 33%|███▎ | 170/520 [10:55<22:09, 3.80s/it] {'loss': 1.2362, 'grad_norm': 0.0011630842526514306, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:55<22:09, 3.80s/it] 33%|███▎ | 171/520 [10:59<22:09, 3.81s/it] {'loss': 1.2146, 'grad_norm': 0.0012766975882271391, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:59<22:09, 3.81s/it] 33%|███▎ | 172/520 [11:03<22:05, 3.81s/it] {'loss': 1.2886, 'grad_norm': 0.001316999890139306, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:03<22:05, 3.81s/it] 33%|███▎ | 173/520 [11:06<22:00, 3.81s/it] {'loss': 1.2352, 'grad_norm': 0.001250978845800271, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:06<22:00, 3.81s/it] 33%|███▎ | 174/520 [11:10<21:56, 3.80s/it] {'loss': 1.2835, 'grad_norm': 0.001318834029665262, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:10<21:56, 3.80s/it] 34%|███▎ | 175/520 [11:14<21:54, 3.81s/it] {'loss': 1.1939, 'grad_norm': 0.0011464475986404452, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:14<21:54, 3.81s/it] 34%|███▍ | 176/520 [11:18<22:05, 3.85s/it] {'loss': 1.3195, 'grad_norm': 0.0012454820200554285, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:18<22:05, 3.85s/it] 34%|███▍ | 177/520 [11:22<22:31, 3.94s/it] {'loss': 1.1903, 'grad_norm': 0.0012842310036487602, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:22<22:31, 3.94s/it] 34%|███▍ | 178/520 [11:26<22:48, 4.00s/it] {'loss': 1.2635, 'grad_norm': 0.0013873462598502475, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:26<22:48, 4.00s/it] 34%|███▍ | 179/520 [11:30<22:56, 4.04s/it] {'loss': 1.3432, 'grad_norm': 0.0012352087417034606, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:30<22:56, 4.04s/it] 35%|███▍ | 180/520 [11:35<23:01, 4.06s/it] {'loss': 1.2673, 'grad_norm': 0.0013404315155555381, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:35<23:01, 4.06s/it] 35%|███▍ | 181/520 [11:38<22:38, 4.01s/it] {'loss': 1.2312, 'grad_norm': 0.0011776024586213968, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:38<22:38, 4.01s/it] 35%|███▌ | 182/520 [11:42<22:11, 3.94s/it] {'loss': 1.2461, 'grad_norm': 0.0012536610244145857, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:42<22:11, 3.94s/it] 35%|███▌ | 183/520 [11:46<21:53, 3.90s/it] {'loss': 1.2714, 'grad_norm': 0.0011906002474594622, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:46<21:53, 3.90s/it] 35%|███▌ | 184/520 [11:50<21:41, 3.87s/it] {'loss': 1.2041, 'grad_norm': 0.0012586637014129064, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:50<21:41, 3.87s/it] 36%|███▌ | 185/520 [11:54<21:30, 3.85s/it] {'loss': 1.3472, 'grad_norm': 0.001235683522320004, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:54<21:30, 3.85s/it] 36%|███▌ | 186/520 [11:57<21:21, 3.84s/it] {'loss': 1.2245, 'grad_norm': 0.0013100588326722452, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:57<21:21, 3.84s/it] 36%|███▌ | 187/520 [12:01<21:08, 3.81s/it] {'loss': 1.2266, 'grad_norm': 0.0014172139316424553, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:01<21:08, 3.81s/it] 36%|███▌ | 188/520 [12:05<20:48, 3.76s/it] {'loss': 1.3063, 'grad_norm': 0.0013025687241724445, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:05<20:48, 3.76s/it] 36%|███▋ | 189/520 [12:09<20:36, 3.74s/it] {'loss': 1.3143, 'grad_norm': 0.0011381278930901646, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:09<20:36, 3.74s/it] 37%|███▋ | 190/520 [12:12<20:25, 3.71s/it] {'loss': 1.2366, 'grad_norm': 0.0013338339456391347, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:12<20:25, 3.71s/it] 37%|███▋ | 191/520 [12:16<20:15, 3.70s/it] {'loss': 1.1906, 'grad_norm': 0.0010901900663482997, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:16<20:15, 3.70s/it] 37%|███▋ | 192/520 [12:20<20:12, 3.70s/it] {'loss': 1.2718, 'grad_norm': 0.001182234319879706, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:20<20:12, 3.70s/it] 37%|███▋ | 193/520 [12:23<20:04, 3.68s/it] {'loss': 1.2351, 'grad_norm': 0.00146205880652085, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:23<20:04, 3.68s/it] 37%|███▋ | 194/520 [12:27<20:18, 3.74s/it] {'loss': 1.1339, 'grad_norm': 0.0013531585270907755, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:27<20:18, 3.74s/it] 38%|███▊ | 195/520 [12:31<20:25, 3.77s/it] {'loss': 1.2919, 'grad_norm': 0.0012238073862903048, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:31<20:25, 3.77s/it] 38%|███▊ | 196/520 [12:35<20:31, 3.80s/it] {'loss': 1.2678, 'grad_norm': 0.001297774498293076, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:35<20:31, 3.80s/it] 38%|███▊ | 197/520 [12:38<20:15, 3.76s/it] {'loss': 1.2156, 'grad_norm': 0.0011994581959775333, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:38<20:15, 3.76s/it] 38%|███▊ | 198/520 [12:42<20:02, 3.73s/it] {'loss': 1.2916, 'grad_norm': 0.0013391875153636185, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:42<20:02, 3.73s/it] 38%|███▊ | 199/520 [12:46<19:52, 3.72s/it] {'loss': 1.203, 'grad_norm': 0.0012656702896104506, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:46<19:52, 3.72s/it] 38%|███▊ | 200/520 [12:49<19:45, 3.70s/it] {'loss': 1.1839, 'grad_norm': 0.0013845130344885444, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:49<19:45, 3.70s/it] 39%|███▊ | 201/520 [12:53<19:41, 3.70s/it] {'loss': 1.213, 'grad_norm': 0.001076258415832637, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:53<19:41, 3.70s/it] 39%|███▉ | 202/520 [12:57<19:34, 3.69s/it] {'loss': 1.2052, 'grad_norm': 0.0011931537669140134, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:57<19:34, 3.69s/it] 39%|███▉ | 203/520 [13:01<19:47, 3.75s/it] {'loss': 1.2607, 'grad_norm': 0.0012778713650804232, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:01<19:47, 3.75s/it] 39%|███▉ | 204/520 [13:04<19:41, 3.74s/it] {'loss': 1.2727, 'grad_norm': 0.0012846887614715, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:04<19:41, 3.74s/it] 39%|███▉ | 205/520 [13:08<19:30, 3.71s/it] {'loss': 1.2096, 'grad_norm': 0.001267003096405923, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:08<19:30, 3.71s/it] 40%|███▉ | 206/520 [13:12<19:23, 3.71s/it] {'loss': 1.3114, 'grad_norm': 0.0012125434289076969, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:12<19:23, 3.71s/it] 40%|███▉ | 207/520 [13:16<19:28, 3.73s/it] {'loss': 1.186, 'grad_norm': 0.001218779907156212, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:16<19:28, 3.73s/it] 40%|████ | 208/520 [13:19<19:33, 3.76s/it] {'loss': 1.2997, 'grad_norm': 0.0013437411865938967, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:19<19:33, 3.76s/it] 40%|████ | 209/520 [13:23<19:34, 3.78s/it] {'loss': 1.2147, 'grad_norm': 0.0012251388923840915, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:23<19:34, 3.78s/it] 40%|████ | 210/520 [13:27<19:36, 3.80s/it] {'loss': 1.2838, 'grad_norm': 0.0013544459188126671, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:27<19:36, 3.80s/it] 41%|████ | 211/520 [13:31<19:26, 3.78s/it] {'loss': 1.2851, 'grad_norm': 0.00114832018292921, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:31<19:26, 3.78s/it] 41%|████ | 212/520 [13:34<19:09, 3.73s/it] {'loss': 1.2789, 'grad_norm': 0.0012422339657923304, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:34<19:09, 3.73s/it] 41%|████ | 213/520 [13:38<19:04, 3.73s/it] {'loss': 1.2347, 'grad_norm': 0.0014149632200237707, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:38<19:04, 3.73s/it] 41%|████ | 214/520 [13:42<18:51, 3.70s/it] {'loss': 1.2223, 'grad_norm': 0.0012408838508741488, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:42<18:51, 3.70s/it] 41%|████▏ | 215/520 [13:45<18:45, 3.69s/it] {'loss': 1.1418, 'grad_norm': 0.0011424996101841026, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:45<18:45, 3.69s/it] 42%|████▏ | 216/520 [13:49<18:40, 3.69s/it] {'loss': 1.143, 'grad_norm': 0.0011676384748367493, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:49<18:40, 3.69s/it] 42%|████▏ | 217/520 [13:53<18:34, 3.68s/it] {'loss': 1.2664, 'grad_norm': 0.0012515760911376696, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:53<18:34, 3.68s/it] 42%|████▏ | 218/520 [13:56<18:32, 3.69s/it] {'loss': 1.2572, 'grad_norm': 0.0012547400122439737, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:56<18:32, 3.69s/it] 42%|████▏ | 219/520 [14:00<18:26, 3.68s/it] {'loss': 1.2532, 'grad_norm': 0.0010909909368099673, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:00<18:26, 3.68s/it] 42%|████▏ | 220/520 [14:04<18:21, 3.67s/it] {'loss': 1.1902, 'grad_norm': 0.0011625768214008191, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:04<18:21, 3.67s/it] 42%|████▎ | 221/520 [14:07<18:17, 3.67s/it] {'loss': 1.2591, 'grad_norm': 0.00128393515472464, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:07<18:17, 3.67s/it] 43%|████▎ | 222/520 [14:11<18:11, 3.66s/it] {'loss': 1.1942, 'grad_norm': 0.0012426441063829335, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:11<18:11, 3.66s/it] 43%|████▎ | 223/520 [14:15<18:24, 3.72s/it] {'loss': 1.1891, 'grad_norm': 0.0011324373140428217, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:15<18:24, 3.72s/it] 43%|████▎ | 224/520 [14:19<18:36, 3.77s/it] {'loss': 1.2602, 'grad_norm': 0.001129204390823737, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:19<18:36, 3.77s/it] 43%|████▎ | 225/520 [14:23<18:41, 3.80s/it] {'loss': 1.1946, 'grad_norm': 0.001223718856279806, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:23<18:41, 3.80s/it] 43%|████▎ | 226/520 [14:27<18:40, 3.81s/it] {'loss': 1.2959, 'grad_norm': 0.0011756784131931157, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:27<18:40, 3.81s/it] 44%|████▎ | 227/520 [14:30<18:40, 3.82s/it] {'loss': 1.2787, 'grad_norm': 0.0011054623148163611, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:30<18:40, 3.82s/it] 44%|████▍ | 228/520 [14:34<18:42, 3.84s/it] {'loss': 1.2987, 'grad_norm': 0.0013145229145431502, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:34<18:42, 3.84s/it] 44%|████▍ | 229/520 [14:38<18:44, 3.87s/it] {'loss': 1.253, 'grad_norm': 0.0011021599572719333, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:38<18:44, 3.87s/it] 44%|████▍ | 230/520 [14:42<18:42, 3.87s/it] {'loss': 1.1418, 'grad_norm': 0.001166765945667424, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:42<18:42, 3.87s/it] 44%|████▍ | 231/520 [14:46<18:38, 3.87s/it] {'loss': 1.2087, 'grad_norm': 0.001110096778582624, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:46<18:38, 3.87s/it] 45%|████▍ | 232/520 [14:50<18:37, 3.88s/it] {'loss': 1.3337, 'grad_norm': 0.0013378333573043784, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:50<18:37, 3.88s/it] 45%|████▍ | 233/520 [14:54<18:32, 3.88s/it] {'loss': 1.2248, 'grad_norm': 0.0013780878977997315, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:54<18:32, 3.88s/it] 45%|████▌ | 234/520 [14:58<18:29, 3.88s/it] {'loss': 1.1586, 'grad_norm': 0.001332077125312456, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:58<18:29, 3.88s/it] 45%|████▌ | 235/520 [15:01<18:26, 3.88s/it] {'loss': 1.2142, 'grad_norm': 0.0012296287977977384, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:01<18:26, 3.88s/it] 45%|████▌ | 236/520 [15:05<18:21, 3.88s/it] {'loss': 1.2861, 'grad_norm': 0.0011163284278848465, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:05<18:21, 3.88s/it] 46%|████▌ | 237/520 [15:09<18:17, 3.88s/it] {'loss': 1.2865, 'grad_norm': 0.001229486750199703, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:09<18:17, 3.88s/it] 46%|████▌ | 238/520 [15:13<18:14, 3.88s/it] {'loss': 1.2199, 'grad_norm': 0.0013073714232101983, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:13<18:14, 3.88s/it] 46%|████▌ | 239/520 [15:17<18:12, 3.89s/it] {'loss': 1.2948, 'grad_norm': 0.0012356286486207072, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:17<18:12, 3.89s/it] 46%|████▌ | 240/520 [15:21<18:07, 3.88s/it] {'loss': 1.1011, 'grad_norm': 0.0012861651617400232, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:21<18:07, 3.88s/it] 46%|████▋ | 241/520 [15:25<18:03, 3.88s/it] {'loss': 1.1888, 'grad_norm': 0.001216752430753774, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:25<18:03, 3.88s/it] 47%|████▋ | 242/520 [15:29<18:01, 3.89s/it] {'loss': 1.2042, 'grad_norm': 0.0011114010991743676, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:29<18:01, 3.89s/it] 47%|████▋ | 243/520 [15:33<18:00, 3.90s/it] {'loss': 1.1961, 'grad_norm': 0.0011759370709284501, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:33<18:00, 3.90s/it] 47%|████▋ | 244/520 [15:36<17:53, 3.89s/it] {'loss': 1.3081, 'grad_norm': 0.0011955337356142305, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:36<17:53, 3.89s/it] 47%|████▋ | 245/520 [15:40<17:47, 3.88s/it] {'loss': 1.1754, 'grad_norm': 0.0012012007104600002, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:40<17:47, 3.88s/it] 47%|████▋ | 246/520 [15:44<17:45, 3.89s/it] {'loss': 1.3152, 'grad_norm': 0.0012250435077314206, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:44<17:45, 3.89s/it] 48%|████▊ | 247/520 [15:48<17:40, 3.89s/it] {'loss': 1.3543, 'grad_norm': 0.001216218442043296, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:48<17:40, 3.89s/it] 48%|████▊ | 248/520 [15:52<17:31, 3.87s/it] {'loss': 1.1808, 'grad_norm': 0.0012199088247571664, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:52<17:31, 3.87s/it] 48%|████▊ | 249/520 [15:56<17:12, 3.81s/it] {'loss': 1.2704, 'grad_norm': 0.0011595562799230704, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:56<17:12, 3.81s/it] 48%|████▊ | 250/520 [15:59<16:58, 3.77s/it] {'loss': 1.2085, 'grad_norm': 0.0012937159099300703, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:59<16:58, 3.77s/it] 48%|████▊ | 251/520 [16:03<16:44, 3.73s/it] {'loss': 1.2749, 'grad_norm': 0.001240671814431177, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:03<16:44, 3.73s/it] 48%|████▊ | 252/520 [16:07<16:33, 3.71s/it] {'loss': 1.2229, 'grad_norm': 0.0011623428563213023, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:07<16:33, 3.71s/it] 49%|████▊ | 253/520 [16:10<16:29, 3.71s/it] {'loss': 1.2739, 'grad_norm': 0.001419834030456478, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:10<16:29, 3.71s/it] 49%|████▉ | 254/520 [16:14<16:20, 3.69s/it] {'loss': 1.2092, 'grad_norm': 0.0011293164027801424, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:14<16:20, 3.69s/it] 49%|████▉ | 255/520 [16:18<16:14, 3.68s/it] {'loss': 1.2085, 'grad_norm': 0.0013592902528387087, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:18<16:14, 3.68s/it] 49%|████▉ | 256/520 [16:21<16:08, 3.67s/it] {'loss': 1.2627, 'grad_norm': 0.0012898656740606587, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:21<16:08, 3.67s/it] 49%|████▉ | 257/520 [16:25<16:01, 3.65s/it] {'loss': 1.2405, 'grad_norm': 0.0012177671577590472, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:25<16:01, 3.65s/it] 50%|████▉ | 258/520 [16:29<15:56, 3.65s/it] {'loss': 1.2513, 'grad_norm': 0.0011205419108943294, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:29<15:56, 3.65s/it] 50%|████▉ | 259/520 [16:32<15:53, 3.65s/it] {'loss': 1.3284, 'grad_norm': 0.0014613969161043048, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:32<15:53, 3.65s/it] 50%|█████ | 260/520 [16:36<15:48, 3.65s/it] {'loss': 1.2639, 'grad_norm': 0.001232405178573809, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:36<15:48, 3.65s/it] 50%|█████ | 261/520 [16:39<15:46, 3.65s/it] {'loss': 1.2111, 'grad_norm': 0.0012084130295836244, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:39<15:46, 3.65s/it] 50%|█████ | 262/520 [16:43<15:42, 3.65s/it] {'loss': 1.182, 'grad_norm': 0.0012052467239929453, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:43<15:42, 3.65s/it] 51%|█████ | 263/520 [16:47<15:40, 3.66s/it] {'loss': 1.2326, 'grad_norm': 0.001174044846902134, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:47<15:40, 3.66s/it] 51%|█████ | 264/520 [16:50<15:38, 3.66s/it] {'loss': 1.2764, 'grad_norm': 0.0011587866782833088, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:50<15:38, 3.66s/it] 51%|█████ | 265/520 [16:54<15:35, 3.67s/it] {'loss': 1.1932, 'grad_norm': 0.0012499865386011609, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:54<15:35, 3.67s/it] 51%|█████ | 266/520 [16:58<15:33, 3.68s/it] {'loss': 1.067, 'grad_norm': 0.0011482981619873197, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:58<15:33, 3.68s/it] 51%|█████▏ | 267/520 [17:02<15:29, 3.67s/it] {'loss': 1.1928, 'grad_norm': 0.0012115594490471938, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:02<15:29, 3.67s/it] 52%|█████▏ | 268/520 [17:05<15:28, 3.68s/it] {'loss': 1.3382, 'grad_norm': 0.0015381313164529887, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:05<15:28, 3.68s/it] 52%|█████▏ | 269/520 [17:09<15:20, 3.67s/it] {'loss': 1.2908, 'grad_norm': 0.0012447423774620393, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:09<15:20, 3.67s/it] 52%|█████▏ | 270/520 [17:13<15:18, 3.67s/it] {'loss': 1.1603, 'grad_norm': 0.001121844090539472, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:13<15:18, 3.67s/it] 52%|█████▏ | 271/520 [17:16<15:13, 3.67s/it] {'loss': 1.2806, 'grad_norm': 0.0012064024279936107, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:16<15:13, 3.67s/it] 52%|█████▏ | 272/520 [17:20<15:18, 3.71s/it] {'loss': 1.1778, 'grad_norm': 0.001195757508319323, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:20<15:18, 3.71s/it] 52%|█████▎ | 273/520 [17:24<15:28, 3.76s/it] {'loss': 1.2905, 'grad_norm': 0.0011629995108146482, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:24<15:28, 3.76s/it] 53%|█████▎ | 274/520 [17:28<15:36, 3.81s/it] {'loss': 1.2545, 'grad_norm': 0.0013931679958835625, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:28<15:36, 3.81s/it] 53%|█████▎ | 275/520 [17:32<15:40, 3.84s/it] {'loss': 1.2005, 'grad_norm': 0.0013474512748953016, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:32<15:40, 3.84s/it] 53%|█████▎ | 276/520 [17:36<15:43, 3.87s/it] {'loss': 1.2659, 'grad_norm': 0.0014643587399653301, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:36<15:43, 3.87s/it] 53%|█████▎ | 277/520 [17:40<15:45, 3.89s/it] {'loss': 1.2937, 'grad_norm': 0.0011024485270624683, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:40<15:45, 3.89s/it] 53%|█████▎ | 278/520 [17:44<15:45, 3.91s/it] {'loss': 1.1502, 'grad_norm': 0.0010526015285050954, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:44<15:45, 3.91s/it] 54%|█████▎ | 279/520 [17:47<15:42, 3.91s/it] {'loss': 1.1729, 'grad_norm': 0.001231216303264621, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:47<15:42, 3.91s/it] 54%|█████▍ | 280/520 [17:51<15:31, 3.88s/it] {'loss': 1.1968, 'grad_norm': 0.001426622348437595, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:51<15:31, 3.88s/it] 54%|█████▍ | 281/520 [17:55<15:14, 3.83s/it] {'loss': 1.2984, 'grad_norm': 0.0012679251432392085, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:55<15:14, 3.83s/it] 54%|█████▍ | 282/520 [17:59<15:01, 3.79s/it] {'loss': 1.1624, 'grad_norm': 0.0011020769692964537, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:59<15:01, 3.79s/it] 54%|█████▍ | 283/520 [18:02<14:50, 3.76s/it] {'loss': 1.3065, 'grad_norm': 0.0012721092905537267, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:02<14:50, 3.76s/it] 55%|█████▍ | 284/520 [18:06<14:43, 3.74s/it] {'loss': 1.1786, 'grad_norm': 0.0013526066544796583, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:06<14:43, 3.74s/it] 55%|█████▍ | 285/520 [18:10<14:35, 3.72s/it] {'loss': 1.1873, 'grad_norm': 0.0012163467774456752, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:10<14:35, 3.72s/it] 55%|█████▌ | 286/520 [18:13<14:30, 3.72s/it] {'loss': 1.0711, 'grad_norm': 0.0012493221851560855, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:13<14:30, 3.72s/it] 55%|█████▌ | 287/520 [18:17<14:22, 3.70s/it] {'loss': 1.2959, 'grad_norm': 0.0012264392011719577, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:17<14:22, 3.70s/it] 55%|█████▌ | 288/520 [18:21<14:19, 3.70s/it] {'loss': 1.3285, 'grad_norm': 0.0011538306407846185, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:21<14:19, 3.70s/it] 56%|█████▌ | 289/520 [18:25<14:14, 3.70s/it] {'loss': 1.2006, 'grad_norm': 0.001158708165595777, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:25<14:14, 3.70s/it] 56%|█████▌ | 290/520 [18:28<14:11, 3.70s/it] {'loss': 1.1275, 'grad_norm': 0.001157990345462503, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:28<14:11, 3.70s/it] 56%|█████▌ | 291/520 [18:32<14:05, 3.69s/it] {'loss': 1.1719, 'grad_norm': 0.001144203344045015, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:32<14:05, 3.69s/it] 56%|█████▌ | 292/520 [18:36<14:02, 3.70s/it] {'loss': 1.2277, 'grad_norm': 0.0011730556785746011, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:36<14:02, 3.70s/it] 56%|█████▋ | 293/520 [18:39<13:58, 3.70s/it] {'loss': 1.1738, 'grad_norm': 0.0012408273737417707, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:39<13:58, 3.70s/it] 57%|█████▋ | 294/520 [18:43<13:56, 3.70s/it] {'loss': 1.195, 'grad_norm': 0.0013250693277668515, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:43<13:56, 3.70s/it] 57%|█████▋ | 295/520 [18:47<13:54, 3.71s/it] {'loss': 1.218, 'grad_norm': 0.001254419956623443, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:47<13:54, 3.71s/it] 57%|█████▋ | 296/520 [18:50<13:50, 3.71s/it] {'loss': 1.1461, 'grad_norm': 0.001339634604040829, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:50<13:50, 3.71s/it] 57%|█████▋ | 297/520 [18:54<13:43, 3.69s/it] {'loss': 1.2715, 'grad_norm': 0.0013173807409288655, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:54<13:43, 3.69s/it] 57%|█████▋ | 298/520 [18:58<13:39, 3.69s/it] {'loss': 1.238, 'grad_norm': 0.0011413691700890398, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:58<13:39, 3.69s/it] 57%|█████▊ | 299/520 [19:01<13:34, 3.69s/it] {'loss': 1.2521, 'grad_norm': 0.0011302701630572381, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:01<13:34, 3.69s/it] 58%|█████▊ | 300/520 [19:05<13:31, 3.69s/it] {'loss': 1.2861, 'grad_norm': 0.0011703864740214168, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:05<13:31, 3.69s/it] 58%|█████▊ | 301/520 [19:09<13:30, 3.70s/it] {'loss': 1.2627, 'grad_norm': 0.001140894381944889, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:09<13:30, 3.70s/it] 58%|█████▊ | 302/520 [19:13<13:25, 3.70s/it] {'loss': 1.2662, 'grad_norm': 0.0012564404291949525, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:13<13:25, 3.70s/it] 58%|█████▊ | 303/520 [19:16<13:19, 3.68s/it] {'loss': 1.1919, 'grad_norm': 0.001322887206220897, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:16<13:19, 3.68s/it] 58%|█████▊ | 304/520 [19:20<13:13, 3.67s/it] {'loss': 1.1662, 'grad_norm': 0.0013443394162727849, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:20<13:13, 3.67s/it] 59%|█████▊ | 305/520 [19:24<13:08, 3.67s/it] {'loss': 1.2946, 'grad_norm': 0.0013061580650261364, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:24<13:08, 3.67s/it] 59%|█████▉ | 306/520 [19:27<13:03, 3.66s/it] {'loss': 1.2414, 'grad_norm': 0.0011934311142914986, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:27<13:03, 3.66s/it] 59%|█████▉ | 307/520 [19:31<12:59, 3.66s/it] {'loss': 1.1783, 'grad_norm': 0.0010940756359680201, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:31<12:59, 3.66s/it] 59%|█████▉ | 308/520 [19:34<12:54, 3.65s/it] {'loss': 1.2994, 'grad_norm': 0.0013100169228594707, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:34<12:54, 3.65s/it] 59%|█████▉ | 309/520 [19:38<13:10, 3.74s/it] {'loss': 1.1816, 'grad_norm': 0.0011445008043885993, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:38<13:10, 3.74s/it] 60%|█████▉ | 310/520 [19:42<12:59, 3.71s/it] {'loss': 1.1636, 'grad_norm': 0.0012074822750165468, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:42<12:59, 3.71s/it] 60%|█████▉ | 311/520 [19:46<12:56, 3.72s/it] {'loss': 1.1353, 'grad_norm': 0.0012560509190391957, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:46<12:56, 3.72s/it] 60%|██████ | 312/520 [19:49<12:49, 3.70s/it] {'loss': 1.1271, 'grad_norm': 0.0013237999285905064, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:49<12:49, 3.70s/it] 60%|██████ | 313/520 [19:53<12:44, 3.69s/it] {'loss': 1.1161, 'grad_norm': 0.0010737611868639964, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:53<12:44, 3.69s/it] 60%|██████ | 314/520 [19:57<12:59, 3.78s/it] {'loss': 1.1557, 'grad_norm': 0.0010975817921946313, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:57<12:59, 3.78s/it] 61%|██████ | 315/520 [20:01<12:47, 3.74s/it] {'loss': 1.2143, 'grad_norm': 0.0015937454993527464, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [20:01<12:47, 3.74s/it] 61%|██████ | 316/520 [20:05<13:05, 3.85s/it] {'loss': 1.1336, 'grad_norm': 0.0012934908850520288, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:05<13:05, 3.85s/it] 61%|██████ | 317/520 [20:09<12:49, 3.79s/it] {'loss': 1.1471, 'grad_norm': 0.0010615821520253233, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:09<12:49, 3.79s/it] 61%|██████ | 318/520 [20:12<12:38, 3.76s/it] {'loss': 1.2582, 'grad_norm': 0.0013252733397837464, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:12<12:38, 3.76s/it] 61%|██████▏ | 319/520 [20:16<13:00, 3.88s/it] {'loss': 1.1338, 'grad_norm': 0.0010925016088604309, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:16<13:00, 3.88s/it] 62%|██████▏ | 320/520 [20:20<12:43, 3.82s/it] {'loss': 1.0815, 'grad_norm': 0.00120897398695758, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:20<12:43, 3.82s/it] 62%|██████▏ | 321/520 [20:24<12:29, 3.77s/it] {'loss': 1.2777, 'grad_norm': 0.0013247512488843312, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:24<12:29, 3.77s/it] 62%|██████▏ | 322/520 [20:27<12:20, 3.74s/it] {'loss': 1.1123, 'grad_norm': 0.0011576805995901483, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:27<12:20, 3.74s/it] 62%|██████▏ | 323/520 [20:31<12:11, 3.72s/it] {'loss': 1.1821, 'grad_norm': 0.0011410395802253102, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:31<12:11, 3.72s/it] 62%|██████▏ | 324/520 [20:35<12:04, 3.70s/it] {'loss': 1.2117, 'grad_norm': 0.0011903958463270162, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:35<12:04, 3.70s/it] 62%|██████▎ | 325/520 [20:38<11:59, 3.69s/it] {'loss': 1.2155, 'grad_norm': 0.0011988183290458697, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:38<11:59, 3.69s/it] 63%|██████▎ | 326/520 [20:42<11:52, 3.67s/it] {'loss': 1.2131, 'grad_norm': 0.0012182513390457, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:42<11:52, 3.67s/it] 63%|██████▎ | 327/520 [20:46<11:46, 3.66s/it] {'loss': 1.2217, 'grad_norm': 0.0013091114280685756, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:46<11:46, 3.66s/it] 63%|██████▎ | 328/520 [20:49<11:41, 3.65s/it] {'loss': 1.2591, 'grad_norm': 0.001219980982400191, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:49<11:41, 3.65s/it] 63%|██████▎ | 329/520 [20:53<11:36, 3.64s/it] {'loss': 1.1362, 'grad_norm': 0.0010456293506806418, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:53<11:36, 3.64s/it] 63%|██████▎ | 330/520 [20:57<11:32, 3.64s/it] {'loss': 1.2081, 'grad_norm': 0.001095502005525369, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:57<11:32, 3.64s/it] 64%|██████▎ | 331/520 [21:00<11:27, 3.64s/it] {'loss': 1.1688, 'grad_norm': 0.0011272820303890464, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:00<11:27, 3.64s/it] 64%|██████▍ | 332/520 [21:04<11:24, 3.64s/it] {'loss': 1.2496, 'grad_norm': 0.001097614549415654, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:04<11:24, 3.64s/it] 64%|██████▍ | 333/520 [21:07<11:20, 3.64s/it] {'loss': 1.3072, 'grad_norm': 0.0012467656040029487, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:07<11:20, 3.64s/it] 64%|██████▍ | 334/520 [21:11<11:16, 3.64s/it] {'loss': 1.2173, 'grad_norm': 0.001237073169410237, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:11<11:16, 3.64s/it] 64%|██████▍ | 335/520 [21:15<11:11, 3.63s/it] {'loss': 1.2178, 'grad_norm': 0.0010909050687011171, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:15<11:11, 3.63s/it] 65%|██████▍ | 336/520 [21:18<11:11, 3.65s/it] {'loss': 1.1178, 'grad_norm': 0.0012846797792807046, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:18<11:11, 3.65s/it] 65%|██████▍ | 337/520 [21:22<11:08, 3.65s/it] {'loss': 1.1006, 'grad_norm': 0.001127576494640259, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:22<11:08, 3.65s/it] 65%|██████▌ | 338/520 [21:26<11:04, 3.65s/it] {'loss': 1.2142, 'grad_norm': 0.001146956825347931, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:26<11:04, 3.65s/it] 65%|██████▌ | 339/520 [21:29<10:59, 3.65s/it] {'loss': 1.1648, 'grad_norm': 0.0011446775904201702, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:29<10:59, 3.65s/it] 65%|██████▌ | 340/520 [21:33<10:56, 3.65s/it] {'loss': 1.1571, 'grad_norm': 0.0011961956650500827, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:33<10:56, 3.65s/it] 66%|██████▌ | 341/520 [21:37<10:53, 3.65s/it] {'loss': 1.1797, 'grad_norm': 0.0012444426394920887, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:37<10:53, 3.65s/it] 66%|██████▌ | 342/520 [21:40<10:48, 3.64s/it] {'loss': 1.2209, 'grad_norm': 0.001480013512977157, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:40<10:48, 3.64s/it] 66%|██████▌ | 343/520 [21:44<10:44, 3.64s/it] {'loss': 1.1731, 'grad_norm': 0.0010868434863883828, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:44<10:44, 3.64s/it] 66%|██████▌ | 344/520 [21:48<10:42, 3.65s/it] {'loss': 1.1357, 'grad_norm': 0.0011168967977530733, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:48<10:42, 3.65s/it] 66%|██████▋ | 345/520 [21:51<10:38, 3.65s/it] {'loss': 1.2448, 'grad_norm': 0.0012339556959054076, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:51<10:38, 3.65s/it] 67%|██████▋ | 346/520 [21:55<10:34, 3.65s/it] {'loss': 1.1916, 'grad_norm': 0.0011157242131319294, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:55<10:34, 3.65s/it] 67%|██████▋ | 347/520 [21:59<10:32, 3.65s/it] {'loss': 1.1472, 'grad_norm': 0.0010647041290066678, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:59<10:32, 3.65s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:02<10:28, 3.65s/it] {'loss': 1.1111, 'grad_norm': 0.0013991127871961647, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:02<10:28, 3.65s/it] 67%|██████▋ | 349/520 [22:06<10:25, 3.66s/it] {'loss': 1.1466, 'grad_norm': 0.0011475311876222498, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:06<10:25, 3.66s/it] 67%|██████▋ | 350/520 [22:09<10:21, 3.66s/it] {'loss': 1.1894, 'grad_norm': 0.0012439950213261216, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:09<10:21, 3.66s/it] 68%|██████▊ | 351/520 [22:13<10:17, 3.66s/it] {'loss': 1.0993, 'grad_norm': 0.0010676368781346026, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:13<10:17, 3.66s/it] 68%|██████▊ | 352/520 [22:17<10:15, 3.66s/it] {'loss': 1.2217, 'grad_norm': 0.0011271719636404569, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:17<10:15, 3.66s/it] 68%|██████▊ | 353/520 [22:21<10:13, 3.67s/it] {'loss': 1.1476, 'grad_norm': 0.0009622325675219948, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:21<10:13, 3.67s/it] 68%|██████▊ | 354/520 [22:24<10:18, 3.73s/it] {'loss': 1.262, 'grad_norm': 0.0010915923876581492, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:24<10:18, 3.73s/it] 68%|██████▊ | 355/520 [22:28<10:23, 3.78s/it] {'loss': 1.1579, 'grad_norm': 0.0011559942364851162, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:28<10:23, 3.78s/it] 68%|██████▊ | 356/520 [22:32<10:26, 3.82s/it] {'loss': 1.1626, 'grad_norm': 0.0011898956632178119, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:32<10:26, 3.82s/it] 69%|██████▊ | 357/520 [22:36<10:29, 3.86s/it] {'loss': 1.1905, 'grad_norm': 0.0011360623036584321, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:36<10:29, 3.86s/it] 69%|██████▉ | 358/520 [22:40<10:26, 3.87s/it] {'loss': 1.1275, 'grad_norm': 0.0011055888600664276, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:40<10:26, 3.87s/it] 69%|██████▉ | 359/520 [22:44<10:13, 3.81s/it] {'loss': 1.1945, 'grad_norm': 0.001228328209919184, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:44<10:13, 3.81s/it] 69%|██████▉ | 360/520 [22:47<10:01, 3.76s/it] {'loss': 1.2033, 'grad_norm': 0.0011796572111641443, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:47<10:01, 3.76s/it] 69%|██████▉ | 361/520 [22:51<09:52, 3.73s/it] {'loss': 1.2126, 'grad_norm': 0.0010807617995487932, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:51<09:52, 3.73s/it] 70%|██████▉ | 362/520 [22:55<09:45, 3.71s/it] {'loss': 1.1766, 'grad_norm': 0.001210394968013839, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:55<09:45, 3.71s/it] 70%|██████▉ | 363/520 [22:58<09:40, 3.70s/it] {'loss': 1.1982, 'grad_norm': 0.0011542078641945748, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:58<09:40, 3.70s/it] 70%|███████ | 364/520 [23:02<09:37, 3.70s/it] {'loss': 1.2369, 'grad_norm': 0.0011596724209296358, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:02<09:37, 3.70s/it] 70%|███████ | 365/520 [23:06<09:32, 3.70s/it] {'loss': 1.2569, 'grad_norm': 0.0012265297731573305, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:06<09:32, 3.70s/it] 70%|███████ | 366/520 [23:09<09:29, 3.69s/it] {'loss': 1.2136, 'grad_norm': 0.001208885576549105, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:09<09:29, 3.69s/it] 71%|███████ | 367/520 [23:13<09:24, 3.69s/it] {'loss': 1.2151, 'grad_norm': 0.0011644415185949547, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:13<09:24, 3.69s/it] 71%|███████ | 368/520 [23:17<09:19, 3.68s/it] {'loss': 1.0668, 'grad_norm': 0.0013339856428793526, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:17<09:19, 3.68s/it] 71%|███████ | 369/520 [23:20<09:14, 3.67s/it] {'loss': 1.1889, 'grad_norm': 0.0010430877547395257, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:20<09:14, 3.67s/it] 71%|███████ | 370/520 [23:24<09:11, 3.68s/it] {'loss': 1.1307, 'grad_norm': 0.0011087372197280225, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:24<09:11, 3.68s/it] 71%|███████▏ | 371/520 [23:28<09:07, 3.68s/it] {'loss': 1.1296, 'grad_norm': 0.0012129063643935964, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:28<09:07, 3.68s/it] 72%|███████▏ | 372/520 [23:31<09:04, 3.68s/it] {'loss': 1.2624, 'grad_norm': 0.001121289732178229, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:31<09:04, 3.68s/it] 72%|███████▏ | 373/520 [23:35<09:01, 3.69s/it] {'loss': 1.152, 'grad_norm': 0.0012161446474568984, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:35<09:01, 3.69s/it] 72%|███████▏ | 374/520 [23:39<08:57, 3.68s/it] {'loss': 1.2122, 'grad_norm': 0.0011493775434634165, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:39<08:57, 3.68s/it] 72%|███████▏ | 375/520 [23:43<08:55, 3.69s/it] {'loss': 1.1326, 'grad_norm': 0.0011417665678936965, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:43<08:55, 3.69s/it] 72%|███████▏ | 376/520 [23:46<08:52, 3.70s/it] {'loss': 1.2446, 'grad_norm': 0.0011110980158178633, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:46<08:52, 3.70s/it] 72%|███████▎ | 377/520 [23:50<08:50, 3.71s/it] {'loss': 1.1761, 'grad_norm': 0.0011617869243330157, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:50<08:50, 3.71s/it] 73%|███████▎ | 378/520 [23:54<08:44, 3.70s/it] {'loss': 1.2342, 'grad_norm': 0.0011213781097913684, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:54<08:44, 3.70s/it] 73%|███████▎ | 379/520 [23:57<08:41, 3.70s/it] {'loss': 1.2125, 'grad_norm': 0.0010806484585435998, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:57<08:41, 3.70s/it] 73%|███████▎ | 380/520 [24:01<08:37, 3.70s/it] {'loss': 1.2352, 'grad_norm': 0.0011314797546330883, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:01<08:37, 3.70s/it] 73%|███████▎ | 381/520 [24:05<08:34, 3.70s/it] {'loss': 1.2141, 'grad_norm': 0.001101360509030721, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:05<08:34, 3.70s/it] 73%|███████▎ | 382/520 [24:09<08:32, 3.71s/it] {'loss': 1.2006, 'grad_norm': 0.0011445122532921957, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:09<08:32, 3.71s/it] 74%|███████▎ | 383/520 [24:12<08:32, 3.74s/it] {'loss': 1.0514, 'grad_norm': 0.001217395768546695, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:12<08:32, 3.74s/it] 74%|███████▍ | 384/520 [24:16<08:28, 3.74s/it] {'loss': 1.2459, 'grad_norm': 0.0010833176704813175, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:16<08:28, 3.74s/it] 74%|███████▍ | 385/520 [24:20<08:26, 3.76s/it] {'loss': 1.1917, 'grad_norm': 0.001053007447535414, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:20<08:26, 3.76s/it] 74%|███████▍ | 386/520 [24:24<08:23, 3.76s/it] {'loss': 1.147, 'grad_norm': 0.0010116507744154992, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:24<08:23, 3.76s/it] 74%|███████▍ | 387/520 [24:27<08:18, 3.75s/it] {'loss': 1.2639, 'grad_norm': 0.0011354855766577028, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:27<08:18, 3.75s/it] 75%|███████▍ | 388/520 [24:31<08:12, 3.73s/it] {'loss': 1.0994, 'grad_norm': 0.0010664057879898243, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:31<08:12, 3.73s/it] 75%|███████▍ | 389/520 [24:35<08:06, 3.72s/it] {'loss': 1.1492, 'grad_norm': 0.0013034555289970391, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:35<08:06, 3.72s/it] 75%|███████▌ | 390/520 [24:38<08:02, 3.71s/it] {'loss': 1.2119, 'grad_norm': 0.0010815944857062282, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:38<08:02, 3.71s/it] 75%|███████▌ | 391/520 [24:42<07:58, 3.71s/it] {'loss': 1.2855, 'grad_norm': 0.0011790107739760322, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:42<07:58, 3.71s/it] 75%|███████▌ | 392/520 [24:46<07:53, 3.70s/it] {'loss': 1.1055, 'grad_norm': 0.0012193061361869177, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:46<07:53, 3.70s/it] 76%|███████▌ | 393/520 [24:49<07:48, 3.69s/it] {'loss': 1.1049, 'grad_norm': 0.0009681715105152718, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:49<07:48, 3.69s/it] 76%|███████▌ | 394/520 [24:53<07:43, 3.68s/it] {'loss': 1.1689, 'grad_norm': 0.0012019823614870934, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:53<07:43, 3.68s/it] 76%|███████▌ | 395/520 [24:57<07:40, 3.68s/it] {'loss': 1.1318, 'grad_norm': 0.001213335256638543, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:57<07:40, 3.68s/it] 76%|███████▌ | 396/520 [25:00<07:34, 3.67s/it] {'loss': 1.2144, 'grad_norm': 0.0012431439997968954, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:00<07:34, 3.67s/it] 76%|███████▋ | 397/520 [25:04<07:34, 3.69s/it] {'loss': 1.1922, 'grad_norm': 0.0010926487421484892, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:04<07:34, 3.69s/it] 77%|███████▋ | 398/520 [25:08<07:33, 3.72s/it] {'loss': 1.1952, 'grad_norm': 0.0011907223908882846, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:08<07:33, 3.72s/it] 77%|███████▋ | 399/520 [25:12<07:31, 3.73s/it] {'loss': 1.1464, 'grad_norm': 0.001104617048590185, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:12<07:31, 3.73s/it] 77%|███████▋ | 400/520 [25:16<07:29, 3.75s/it] {'loss': 1.1791, 'grad_norm': 0.0010285611186142774, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:16<07:29, 3.75s/it] 77%|███████▋ | 401/520 [25:19<07:27, 3.76s/it] {'loss': 1.0257, 'grad_norm': 0.0012215811991945213, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:19<07:27, 3.76s/it] 77%|███████▋ | 402/520 [25:23<07:23, 3.76s/it] {'loss': 1.1483, 'grad_norm': 0.0011446933206583315, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:23<07:23, 3.76s/it] 78%|███████▊ | 403/520 [25:27<07:20, 3.77s/it] {'loss': 1.1762, 'grad_norm': 0.0012548893748153148, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:27<07:20, 3.77s/it] 78%|███████▊ | 404/520 [25:31<07:17, 3.77s/it] {'loss': 1.0831, 'grad_norm': 0.0013719088739921445, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:31<07:17, 3.77s/it] 78%|███████▊ | 405/520 [25:34<07:13, 3.77s/it] {'loss': 1.1575, 'grad_norm': 0.0011292821245528664, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:34<07:13, 3.77s/it] 78%|███████▊ | 406/520 [25:38<07:09, 3.77s/it] {'loss': 1.0755, 'grad_norm': 0.0013731120030914044, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:38<07:09, 3.77s/it] 78%|███████▊ | 407/520 [25:42<07:06, 3.77s/it] {'loss': 1.2546, 'grad_norm': 0.001149994840216536, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:42<07:06, 3.77s/it] 78%|███████▊ | 408/520 [25:46<07:00, 3.75s/it] {'loss': 1.1667, 'grad_norm': 0.001383199707769941, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:46<07:00, 3.75s/it] 79%|███████▊ | 409/520 [25:49<06:53, 3.73s/it] {'loss': 1.2782, 'grad_norm': 0.0012152258318347932, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:49<06:53, 3.73s/it] 79%|███████▉ | 410/520 [25:53<06:47, 3.70s/it] {'loss': 1.0165, 'grad_norm': 0.0011684776601303987, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:53<06:47, 3.70s/it] 79%|███████▉ | 411/520 [25:57<06:41, 3.69s/it] {'loss': 1.2642, 'grad_norm': 0.0013533204218363292, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:57<06:41, 3.69s/it] 79%|███████▉ | 412/520 [26:00<06:36, 3.67s/it] {'loss': 1.1706, 'grad_norm': 0.001166774056648583, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:00<06:36, 3.67s/it] 79%|███████▉ | 413/520 [26:04<06:32, 3.67s/it] {'loss': 1.169, 'grad_norm': 0.0011620489303086217, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:04<06:32, 3.67s/it] 80%|███████▉ | 414/520 [26:08<06:30, 3.69s/it] {'loss': 0.9809, 'grad_norm': 0.0009461874632081732, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:08<06:30, 3.69s/it] 80%|███████▉ | 415/520 [26:11<06:26, 3.68s/it] {'loss': 1.1502, 'grad_norm': 0.0010682865437931727, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:11<06:26, 3.68s/it] 80%|████████ | 416/520 [26:15<06:27, 3.73s/it] {'loss': 1.0652, 'grad_norm': 0.001227604786699372, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:15<06:27, 3.73s/it] 80%|████████ | 417/520 [26:19<06:25, 3.74s/it] {'loss': 1.2244, 'grad_norm': 0.0012239991199463893, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:19<06:25, 3.74s/it] 80%|████████ | 418/520 [26:23<06:21, 3.74s/it] {'loss': 1.2148, 'grad_norm': 0.0011545392714014523, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:23<06:21, 3.74s/it] 81%|████████ | 419/520 [26:27<06:21, 3.77s/it] {'loss': 1.2065, 'grad_norm': 0.0012555988513732646, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:27<06:21, 3.77s/it] 81%|████████ | 420/520 [26:31<06:26, 3.87s/it] {'loss': 1.0953, 'grad_norm': 0.0012340619006430051, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:31<06:26, 3.87s/it] 81%|████████ | 421/520 [26:35<06:31, 3.95s/it] {'loss': 1.0356, 'grad_norm': 0.0014760098535257409, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:35<06:31, 3.95s/it] 81%|████████ | 422/520 [26:39<06:31, 3.99s/it] {'loss': 1.149, 'grad_norm': 0.0011609881703830538, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:39<06:31, 3.99s/it] 81%|████████▏ | 423/520 [26:43<06:22, 3.94s/it] {'loss': 1.1323, 'grad_norm': 0.0012695819297037388, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:43<06:22, 3.94s/it] 82%|████████▏ | 424/520 [26:47<06:15, 3.91s/it] {'loss': 1.2536, 'grad_norm': 0.001157396976956923, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:47<06:15, 3.91s/it] 82%|████████▏ | 425/520 [26:50<06:06, 3.86s/it] {'loss': 1.1485, 'grad_norm': 0.0011474242428923525, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:50<06:06, 3.86s/it] 82%|████████▏ | 426/520 [26:54<05:56, 3.79s/it] {'loss': 1.1641, 'grad_norm': 0.0014059672938082355, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:54<05:56, 3.79s/it] 82%|████████▏ | 427/520 [26:58<05:49, 3.75s/it] {'loss': 1.0809, 'grad_norm': 0.001097402696370786, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:58<05:49, 3.75s/it] 82%|████████▏ | 428/520 [27:01<05:42, 3.72s/it] {'loss': 1.0622, 'grad_norm': 0.0011892535715106371, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:01<05:42, 3.72s/it] 82%|████████▎ | 429/520 [27:05<05:38, 3.72s/it] {'loss': 1.1562, 'grad_norm': 0.0012486113337000391, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:05<05:38, 3.72s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:09<05:32, 3.70s/it] {'loss': 1.1603, 'grad_norm': 0.0010569990691148435, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:09<05:32, 3.70s/it] 83%|████████▎ | 431/520 [27:12<05:29, 3.70s/it] {'loss': 1.1385, 'grad_norm': 0.0012801522618314124, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:12<05:29, 3.70s/it] 83%|████████▎ | 432/520 [27:16<05:27, 3.73s/it] {'loss': 1.0704, 'grad_norm': 0.0011704235214831668, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:16<05:27, 3.73s/it] 83%|████████▎ | 433/520 [27:20<05:21, 3.70s/it] {'loss': 1.1989, 'grad_norm': 0.001111659606338189, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:20<05:21, 3.70s/it] 83%|████████▎ | 434/520 [27:23<05:20, 3.73s/it] {'loss': 0.9499, 'grad_norm': 0.0011578901620234619, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:23<05:20, 3.73s/it] 84%|████████▎ | 435/520 [27:27<05:16, 3.72s/it] {'loss': 1.2378, 'grad_norm': 0.001286765252493834, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:27<05:16, 3.72s/it] 84%|████████▍ | 436/520 [27:31<05:11, 3.71s/it] {'loss': 1.04, 'grad_norm': 0.0011653175714775987, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:31<05:11, 3.71s/it] 84%|████████▍ | 437/520 [27:35<05:06, 3.69s/it] {'loss': 1.2557, 'grad_norm': 0.0012044837565365436, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:35<05:06, 3.69s/it] 84%|████████▍ | 438/520 [27:38<05:02, 3.69s/it] {'loss': 1.0769, 'grad_norm': 0.0011129444413580194, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:38<05:02, 3.69s/it] 84%|████████▍ | 439/520 [27:42<04:58, 3.68s/it] {'loss': 1.1234, 'grad_norm': 0.0009584241941155133, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:42<04:58, 3.68s/it] 85%|████████▍ | 440/520 [27:46<04:55, 3.69s/it] {'loss': 1.1124, 'grad_norm': 0.001139803888178798, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:46<04:55, 3.69s/it] 85%|████████▍ | 441/520 [27:49<04:53, 3.71s/it] {'loss': 1.1376, 'grad_norm': 0.0011401607414467509, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:49<04:53, 3.71s/it] 85%|████████▌ | 442/520 [27:53<04:48, 3.70s/it] {'loss': 1.1744, 'grad_norm': 0.0012570645518631114, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:53<04:48, 3.70s/it] 85%|████████▌ | 443/520 [27:57<04:43, 3.68s/it] {'loss': 1.1888, 'grad_norm': 0.0011297906075763279, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:57<04:43, 3.68s/it] 85%|████████▌ | 444/520 [28:00<04:43, 3.72s/it] {'loss': 1.1532, 'grad_norm': 0.0010278411391881279, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:00<04:43, 3.72s/it] 86%|████████▌ | 445/520 [28:04<04:42, 3.76s/it] {'loss': 1.0818, 'grad_norm': 0.001093002280857359, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:04<04:42, 3.76s/it] 86%|████████▌ | 446/520 [28:08<04:38, 3.76s/it] {'loss': 1.2152, 'grad_norm': 0.001061363283143557, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:08<04:38, 3.76s/it] 86%|████████▌ | 447/520 [28:12<04:32, 3.73s/it] {'loss': 1.1604, 'grad_norm': 0.0011514789112824577, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:12<04:32, 3.73s/it] 86%|████████▌ | 448/520 [28:15<04:26, 3.70s/it] {'loss': 1.1529, 'grad_norm': 0.0011918234913137378, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:15<04:26, 3.70s/it] 86%|████████▋ | 449/520 [28:19<04:21, 3.69s/it] {'loss': 1.1684, 'grad_norm': 0.0011507220192863332, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:19<04:21, 3.69s/it] 87%|████████▋ | 450/520 [28:23<04:17, 3.67s/it] {'loss': 1.1793, 'grad_norm': 0.0011441682065455852, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:23<04:17, 3.67s/it] 87%|████████▋ | 451/520 [28:26<04:13, 3.67s/it] {'loss': 1.1771, 'grad_norm': 0.0011412248146249967, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:26<04:13, 3.67s/it] 87%|████████▋ | 452/520 [28:30<04:09, 3.66s/it] {'loss': 1.213, 'grad_norm': 0.0010481315696866742, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:30<04:09, 3.66s/it] 87%|████████▋ | 453/520 [28:34<04:04, 3.66s/it] {'loss': 1.1898, 'grad_norm': 0.0011553593132876362, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:34<04:04, 3.66s/it] 87%|████████▋ | 454/520 [28:37<04:02, 3.67s/it] {'loss': 1.0884, 'grad_norm': 0.001184181649497642, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:37<04:02, 3.67s/it] 88%|████████▊ | 455/520 [28:41<03:58, 3.67s/it] {'loss': 1.2276, 'grad_norm': 0.0011125899803120485, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:41<03:58, 3.67s/it] 88%|████████▊ | 456/520 [28:45<03:55, 3.67s/it] {'loss': 1.1524, 'grad_norm': 0.0011437032665783415, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:45<03:55, 3.67s/it] 88%|████████▊ | 457/520 [28:48<03:52, 3.68s/it] {'loss': 1.1003, 'grad_norm': 0.0009916619554168297, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:48<03:52, 3.68s/it] 88%|████████▊ | 458/520 [28:52<03:48, 3.68s/it] {'loss': 1.2801, 'grad_norm': 0.001226894735465372, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:52<03:48, 3.68s/it] 88%|████████▊ | 459/520 [28:56<03:44, 3.69s/it] {'loss': 1.2144, 'grad_norm': 0.001252959785981582, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:56<03:44, 3.69s/it] 88%|████████▊ | 460/520 [29:00<03:44, 3.74s/it] {'loss': 1.1019, 'grad_norm': 0.001143396697815341, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:00<03:44, 3.74s/it] 89%|████████▊ | 461/520 [29:04<03:43, 3.79s/it] {'loss': 1.1792, 'grad_norm': 0.000929480269541264, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:04<03:43, 3.79s/it] 89%|████████▉ | 462/520 [29:07<03:41, 3.82s/it] {'loss': 1.2586, 'grad_norm': 0.0010892230860562579, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:07<03:41, 3.82s/it] 89%|████████▉ | 463/520 [29:11<03:36, 3.79s/it] {'loss': 1.0589, 'grad_norm': 0.0011793099066080143, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:11<03:36, 3.79s/it] 89%|████████▉ | 464/520 [29:15<03:30, 3.76s/it] {'loss': 1.1949, 'grad_norm': 0.0012734299065924706, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:15<03:30, 3.76s/it] 89%|████████▉ | 465/520 [29:19<03:25, 3.74s/it] {'loss': 1.2992, 'grad_norm': 0.0012145423159922277, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:19<03:25, 3.74s/it] 90%|████████▉ | 466/520 [29:22<03:21, 3.73s/it] {'loss': 1.1853, 'grad_norm': 0.0010734791704367845, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:22<03:21, 3.73s/it] 90%|████████▉ | 467/520 [29:26<03:16, 3.72s/it] {'loss': 1.1479, 'grad_norm': 0.001045416095260575, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:26<03:16, 3.72s/it] 90%|█████████ | 468/520 [29:30<03:13, 3.72s/it] {'loss': 1.1612, 'grad_norm': 0.0012720470705869184, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:30<03:13, 3.72s/it] 90%|█████████ | 469/520 [29:33<03:09, 3.71s/it] {'loss': 1.2217, 'grad_norm': 0.0012754571454053857, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:33<03:09, 3.71s/it] 90%|█████████ | 470/520 [29:37<03:04, 3.70s/it] {'loss': 1.1024, 'grad_norm': 0.0010755614902405383, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:37<03:04, 3.70s/it] 91%|█████████ | 471/520 [29:41<03:01, 3.71s/it] {'loss': 1.1258, 'grad_norm': 0.001197630885520623, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:41<03:01, 3.71s/it] 91%|█████████ | 472/520 [29:44<02:57, 3.70s/it] {'loss': 1.0917, 'grad_norm': 0.0011616424983459004, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:44<02:57, 3.70s/it] 91%|█████████ | 473/520 [29:48<02:54, 3.71s/it] {'loss': 1.1559, 'grad_norm': 0.0012115604146889633, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:48<02:54, 3.71s/it] 91%|█████████ | 474/520 [29:52<02:50, 3.70s/it] {'loss': 1.1856, 'grad_norm': 0.0010941558503451754, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:52<02:50, 3.70s/it] 91%|█████████▏| 475/520 [29:56<02:46, 3.70s/it] {'loss': 1.1066, 'grad_norm': 0.0010964923535038958, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:56<02:46, 3.70s/it] 92%|█████████▏| 476/520 [29:59<02:42, 3.70s/it] {'loss': 1.1452, 'grad_norm': 0.0011679238210269736, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:59<02:42, 3.70s/it] 92%|█████████▏| 477/520 [30:03<02:38, 3.68s/it] {'loss': 1.1368, 'grad_norm': 0.0012581885935503232, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:03<02:38, 3.68s/it] 92%|█████████▏| 478/520 [30:07<02:34, 3.67s/it] {'loss': 1.0892, 'grad_norm': 0.0012221782325971815, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:07<02:34, 3.67s/it] 92%|█████████▏| 479/520 [30:10<02:30, 3.67s/it] {'loss': 1.1518, 'grad_norm': 0.0011984946799392625, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:10<02:30, 3.67s/it] 92%|█████████▏| 480/520 [30:14<02:26, 3.66s/it] {'loss': 1.1701, 'grad_norm': 0.0010397030393497794, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:14<02:26, 3.66s/it] 92%|█████████▎| 481/520 [30:17<02:22, 3.67s/it] {'loss': 1.1611, 'grad_norm': 0.0010511254346789486, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:17<02:22, 3.67s/it] 93%|█████████▎| 482/520 [30:21<02:19, 3.67s/it] {'loss': 1.1817, 'grad_norm': 0.0011218236813502096, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:21<02:19, 3.67s/it] 93%|█████████▎| 483/520 [30:25<02:15, 3.66s/it] {'loss': 1.1544, 'grad_norm': 0.0011670338802060405, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:25<02:15, 3.66s/it] 93%|█████████▎| 484/520 [30:28<02:11, 3.66s/it] {'loss': 1.1619, 'grad_norm': 0.001183230252128221, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:28<02:11, 3.66s/it] 93%|█████████▎| 485/520 [30:32<02:08, 3.67s/it] {'loss': 1.1169, 'grad_norm': 0.0011183019279045164, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:32<02:08, 3.67s/it] 93%|█████████▎| 486/520 [30:36<02:04, 3.66s/it] {'loss': 1.2383, 'grad_norm': 0.0012464904776196982, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:36<02:04, 3.66s/it] 94%|█████████▎| 487/520 [30:39<02:00, 3.66s/it] {'loss': 1.0913, 'grad_norm': 0.0011337352678269133, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:39<02:00, 3.66s/it] 94%|█████████▍| 488/520 [30:43<01:56, 3.65s/it] {'loss': 1.039, 'grad_norm': 0.0013007147903675964, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:43<01:56, 3.65s/it] 94%|█████████▍| 489/520 [30:47<01:53, 3.67s/it] {'loss': 1.1826, 'grad_norm': 0.0009513372834016675, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:47<01:53, 3.67s/it] 94%|█████████▍| 490/520 [30:50<01:49, 3.66s/it] {'loss': 1.1555, 'grad_norm': 0.0011675037310258545, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:50<01:49, 3.66s/it] 94%|█████████▍| 491/520 [30:54<01:46, 3.66s/it] {'loss': 1.1206, 'grad_norm': 0.0011784136748744093, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:54<01:46, 3.66s/it] 95%|█████████▍| 492/520 [30:58<01:42, 3.67s/it] {'loss': 1.2325, 'grad_norm': 0.0012133184478857549, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:58<01:42, 3.67s/it] 95%|█████████▍| 493/520 [31:02<01:40, 3.74s/it] {'loss': 1.1799, 'grad_norm': 0.0011103778006272857, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:02<01:40, 3.74s/it] 95%|█████████▌| 494/520 [31:06<01:37, 3.77s/it] {'loss': 1.1728, 'grad_norm': 0.0010559069037696343, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:06<01:37, 3.77s/it] 95%|█████████▌| 495/520 [31:09<01:34, 3.80s/it] {'loss': 1.1425, 'grad_norm': 0.0011987758140991928, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:09<01:34, 3.80s/it] 95%|█████████▌| 496/520 [31:13<01:31, 3.82s/it] {'loss': 1.0599, 'grad_norm': 0.0012365506133635593, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:13<01:31, 3.82s/it] 96%|█████████▌| 497/520 [31:17<01:28, 3.83s/it] {'loss': 1.1126, 'grad_norm': 0.0010188156091118677, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:17<01:28, 3.83s/it] 96%|█████████▌| 498/520 [31:21<01:23, 3.80s/it] {'loss': 1.135, 'grad_norm': 0.0011412304732251151, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:21<01:23, 3.80s/it] 96%|█████████▌| 499/520 [31:25<01:19, 3.77s/it] {'loss': 1.2489, 'grad_norm': 0.0011966959645179276, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:25<01:19, 3.77s/it] 96%|█████████▌| 500/520 [31:28<01:15, 3.75s/it] {'loss': 1.2551, 'grad_norm': 0.0013369439411396755, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:28<01:15, 3.75s/it] 96%|█████████▋| 501/520 [31:32<01:10, 3.72s/it] {'loss': 1.1554, 'grad_norm': 0.00120376473262181, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:32<01:10, 3.72s/it] 97%|█████████▋| 502/520 [31:36<01:06, 3.70s/it] {'loss': 1.1757, 'grad_norm': 0.0010818075899359286, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:36<01:06, 3.70s/it] 97%|█████████▋| 503/520 [31:39<01:02, 3.70s/it] {'loss': 1.1466, 'grad_norm': 0.0011503208687002552, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:39<01:02, 3.70s/it] 97%|█████████▋| 504/520 [31:43<00:59, 3.69s/it] {'loss': 1.1657, 'grad_norm': 0.0012821283678798607, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:43<00:59, 3.69s/it] 97%|█████████▋| 505/520 [31:47<00:55, 3.71s/it] {'loss': 1.1997, 'grad_norm': 0.0011869355792337008, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:47<00:55, 3.71s/it] 97%|█████████▋| 506/520 [31:50<00:51, 3.70s/it] {'loss': 1.1297, 'grad_norm': 0.0011786661937745004, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:50<00:51, 3.70s/it] 98%|█████████▊| 507/520 [31:54<00:48, 3.71s/it] {'loss': 1.2888, 'grad_norm': 0.001033475057407049, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:54<00:48, 3.71s/it] 98%|█████████▊| 508/520 [31:58<00:44, 3.70s/it] {'loss': 1.2425, 'grad_norm': 0.0011659663359159284, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:58<00:44, 3.70s/it] 98%|█████████▊| 509/520 [32:02<00:41, 3.74s/it] {'loss': 1.2165, 'grad_norm': 0.0011052379624271655, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:02<00:41, 3.74s/it] 98%|█████████▊| 510/520 [32:05<00:37, 3.78s/it] {'loss': 1.1626, 'grad_norm': 0.0011208969419294224, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:05<00:37, 3.78s/it] 98%|█████████▊| 511/520 [32:09<00:34, 3.80s/it] {'loss': 1.1381, 'grad_norm': 0.0011096325414367493, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:09<00:34, 3.80s/it] 98%|█████████▊| 512/520 [32:13<00:30, 3.82s/it] {'loss': 1.0252, 'grad_norm': 0.001243889128526415, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:13<00:30, 3.82s/it] 99%|█████████▊| 513/520 [32:17<00:26, 3.84s/it] {'loss': 1.2202, 'grad_norm': 0.0012758056499075827, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:17<00:26, 3.84s/it] 99%|█████████▉| 514/520 [32:21<00:23, 3.84s/it] {'loss': 1.1889, 'grad_norm': 0.0010619452143052497, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:21<00:23, 3.84s/it] 99%|█████████▉| 515/520 [32:25<00:19, 3.84s/it] {'loss': 1.2383, 'grad_norm': 0.0013199359879395127, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:25<00:19, 3.84s/it] 99%|█████████▉| 516/520 [32:29<00:15, 3.85s/it] {'loss': 1.1436, 'grad_norm': 0.0011103810898510423, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:29<00:15, 3.85s/it] 99%|█████████▉| 517/520 [32:32<00:11, 3.84s/it] {'loss': 1.1832, 'grad_norm': 0.0010748954187623175, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:32<00:11, 3.84s/it] 100%|█████████▉| 518/520 [32:36<00:07, 3.83s/it] {'loss': 1.1557, 'grad_norm': 0.0011560425263266738, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:36<00:07, 3.83s/it] 100%|█████████▉| 519/520 [32:40<00:03, 3.82s/it] {'loss': 1.1531, 'grad_norm': 0.0011178238150500086, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:40<00:03, 3.82s/it] 100%|██████████| 520/520 [32:45<00:00, 4.07s/it] {'loss': 1.1522, 'grad_norm': 0.0011160952785941615, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:45<00:00, 4.07s/it] {'train_runtime': 1965.196, 'train_samples_per_second': 33.854, 'train_steps_per_second': 0.265, 'train_loss': 1.2435237826063084, 'epoch': 1.0} + 100%|██████████| 520/520 [32:45<00:00, 4.07s/it] 100%|██████████| 520/520 [32:45<00:00, 3.78s/it] +[2025-10-13 20:55:48,379] [INFO] [launch.py:348:main] Process 1027380 exits successfully. +[2025-10-13 20:55:48,380] [INFO] [launch.py:348:main] Process 1027378 exits successfully. +[2025-10-13 20:55:49,381] [INFO] [launch.py:348:main] Process 1027379 exits successfully. +[2025-10-13 20:55:49,382] [INFO] [launch.py:348:main] Process 1027377 exits successfully. +[2025-10-13 20:55:49,382] [INFO] [launch.py:348:main] Process 1027376 exits successfully. +[2025-10-13 20:55:49,383] [INFO] [launch.py:348:main] Process 1027374 exits successfully. +[2025-10-13 20:55:49,383] [INFO] [launch.py:348:main] Process 1027375 exits successfully. +[2025-10-13 20:55:54,389] [INFO] [launch.py:348:main] Process 1027373 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.1_2e-1_connector-5.0_2.1_2e-1_ablation_20251013_202134.log +Timestamp: 2025-10-13 20:55:56 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation_20251013_205557.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation_20251013_205557.log new file mode 100644 index 0000000000000000000000000000000000000000..0e7b2e420d06bb931aad303f601c6b0d18e9ef0f --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation_20251013_205557.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation_20251013_205557.log +Timestamp: 2025-10-13 20:55:57 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 20:55:59,689] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:02,449] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 20:56:02,450] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 2.3 --temperature_mlp_text 2.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 2.3 --temperature_mlp_vision 2.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 2.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 20:56:05,085] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:06,136] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 20:56:06,136] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 20:56:06,136] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 20:56:06,136] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 20:56:06,136] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 20:56:06,136] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 20:56:06,136] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 20:56:06,139] [INFO] [launch.py:253:main] process 1047396 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:56:06,141] [INFO] [launch.py:253:main] process 1047397 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:56:06,143] [INFO] [launch.py:253:main] process 1047398 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:56:06,145] [INFO] [launch.py:253:main] process 1047399 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:56:06,147] [INFO] [launch.py:253:main] process 1047400 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:56:06,151] [INFO] [launch.py:253:main] process 1047401 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:56:06,155] [INFO] [launch.py:253:main] process 1047402 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 20:56:06,159] [INFO] [launch.py:253:main] process 1047403 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.3', '--temperature_mlp_text', '2.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.3', '--temperature_mlp_vision', '2.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 20:56:12,773] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:12,895] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:13,122] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:13,128] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:13,164] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:13,164] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:13,195] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:56:13,195] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 20:56:13,197] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:13,200] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 20:56:13,301] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:56:13,524] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:56:13,528] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:56:13,563] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:56:13,563] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:56:13,597] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 20:56:13,598] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 2.3, 'temperature_mlp': 2.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 2.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.3, + "temperature_mlp": 2.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1047396:1047396 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047396:1047396 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047396:1047396 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1047396:1047396 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1047396:1047396 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1047396:1047396 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1047399:1047399 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1047399:1047399 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047399:1047399 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047399:1047399 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1047399:1047399 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1047399:1047399 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1047398:1047398 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1047398:1047398 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047398:1047398 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047398:1047398 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1047398:1047398 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1047398:1047398 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1047400:1047400 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1047400:1047400 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047400:1047400 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047400:1047400 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1047400:1047400 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1047400:1047400 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1047402:1047402 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1047402:1047402 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047402:1047402 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047402:1047402 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1047402:1047402 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1047402:1047402 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1047397:1047397 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1047397:1047397 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047397:1047397 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047397:1047397 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1047397:1047397 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1047397:1047397 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1047403:1047403 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1047403:1047403 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047403:1047403 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047403:1047403 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1047403:1047403 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1047403:1047403 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1047401:1047401 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1047401:1047401 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047401:1047401 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047401:1047401 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1047401:1047401 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1047401:1047401 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO ncclCommInitRank comm 0x557cc9814ee0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xcddceb780728ec82 - Init START +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO ncclCommInitRank comm 0x5586436d12e0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xcddceb780728ec82 - Init START +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO ncclCommInitRank comm 0x55cc1d9e2330 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xcddceb780728ec82 - Init START +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO ncclCommInitRank comm 0x5590e49941c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xcddceb780728ec82 - Init START +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO ncclCommInitRank comm 0x5623f42b58b0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xcddceb780728ec82 - Init START +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO ncclCommInitRank comm 0x55d510a528a0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xcddceb780728ec82 - Init START +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO ncclCommInitRank comm 0x5639ad434dc0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xcddceb780728ec82 - Init START +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO ncclCommInitRank comm 0x560b8b8550f0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xcddceb780728ec82 - Init START +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO comm 0x557cc9814ee0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO comm 0x5639ad434dc0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO comm 0x5586436d12e0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO comm 0x560b8b8550f0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO comm 0x5623f42b58b0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO comm 0x55d510a528a0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO comm 0x55cc1d9e2330 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO comm 0x5590e49941c0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1047403:1049003 [7] NCCL INFO ncclCommInitRank comm 0x557cc9814ee0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xcddceb780728ec82 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1047399:1048965 [3] NCCL INFO ncclCommInitRank comm 0x5586436d12e0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xcddceb780728ec82 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1047397:1049002 [1] NCCL INFO ncclCommInitRank comm 0x560b8b8550f0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xcddceb780728ec82 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1047401:1049004 [5] NCCL INFO ncclCommInitRank comm 0x55cc1d9e2330 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xcddceb780728ec82 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1047398:1048982 [2] NCCL INFO ncclCommInitRank comm 0x5623f42b58b0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xcddceb780728ec82 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1047402:1048984 [6] NCCL INFO ncclCommInitRank comm 0x5639ad434dc0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xcddceb780728ec82 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1047400:1048983 [4] NCCL INFO ncclCommInitRank comm 0x55d510a528a0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xcddceb780728ec82 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1047396:1048964 [0] NCCL INFO ncclCommInitRank comm 0x5590e49941c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xcddceb780728ec82 - Init COMPLETE +[2025-10-13 20:56:57,763] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 20:56:59,570] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 20:57:17,855 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 20:57:17,860 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1047400:1053902 [4] NCCL INFO ncclCommInitRank comm 0x7f206006aad0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xefa93151fe19e84d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047402:1053906 [6] NCCL INFO ncclCommInitRank comm 0x7ff2e006ab30 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xefa93151fe19e84d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047398:1053904 [2] NCCL INFO ncclCommInitRank comm 0x7f321006a730 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xefa93151fe19e84d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047396:1053899 [0] NCCL INFO ncclCommInitRank comm 0x7fa1d406ab70 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xefa93151fe19e84d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047403:1053901 [7] NCCL INFO ncclCommInitRank comm 0x7efdd006a9f0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xefa93151fe19e84d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047399:1053900 [3] NCCL INFO ncclCommInitRank comm 0x7ff11806aa40 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xefa93151fe19e84d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047401:1053903 [5] NCCL INFO ncclCommInitRank comm 0x7f97d406ab70 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xefa93151fe19e84d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1047397:1053905 [1] NCCL INFO ncclCommInitRank comm 0x7f616c06aa90 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xefa93151fe19e84d - Init COMPLETE + 0%| | 1/520 [00:36<5:19:14, 36.91s/it] {'loss': 2.2096, 'grad_norm': 0.02485358594474978, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:36<5:19:14, 36.91s/it] 0%| | 2/520 [00:40<2:29:28, 17.31s/it] {'loss': 2.179, 'grad_norm': 0.02639794672578282, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:40<2:29:28, 17.31s/it] 1%| | 3/520 [00:44<1:35:17, 11.06s/it] {'loss': 2.3455, 'grad_norm': 0.03014568014721538, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:44<1:35:17, 11.06s/it] 1%| | 4/520 [00:47<1:09:55, 8.13s/it] {'loss': 1.7469, 'grad_norm': 0.010045162315848732, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:47<1:09:55, 8.13s/it] 1%| | 5/520 [00:51<55:58, 6.52s/it] {'loss': 1.7247, 'grad_norm': 0.006596412724543657, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:51<55:58, 6.52s/it] 1%| | 6/520 [00:55<47:28, 5.54s/it] {'loss': 1.4857, 'grad_norm': 0.004883680421681852, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:55<47:28, 5.54s/it] 1%|▏ | 7/520 [00:58<42:00, 4.91s/it] {'loss': 1.482, 'grad_norm': 0.005808056347038298, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:58<42:00, 4.91s/it] 2%|▏ | 8/520 [01:02<40:04, 4.70s/it] {'loss': 1.5206, 'grad_norm': 0.004385999614280791, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [01:02<40:04, 4.70s/it] 2%|▏ | 9/520 [01:06<37:06, 4.36s/it] {'loss': 1.5705, 'grad_norm': 0.0032524556108542192, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [01:06<37:06, 4.36s/it] 2%|▏ | 10/520 [01:10<35:05, 4.13s/it] {'loss': 1.4051, 'grad_norm': 0.0035035351589991926, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [01:10<35:05, 4.13s/it] 2%|▏ | 11/520 [01:14<35:16, 4.16s/it] {'loss': 1.4836, 'grad_norm': 0.003731266842701971, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [01:14<35:16, 4.16s/it] 2%|▏ | 12/520 [01:17<33:46, 3.99s/it] {'loss': 1.3953, 'grad_norm': 0.0029554257264789363, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [01:17<33:46, 3.99s/it][2025-10-13 20:58:45,040] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:22<34:28, 4.08s/it] {'loss': 1.4172, 'grad_norm': 0.002661463927887369, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:22<34:28, 4.08s/it] 3%|▎ | 14/520 [01:25<33:12, 3.94s/it] {'loss': 1.4542, 'grad_norm': 0.002585325054469441, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:25<33:12, 3.94s/it] 3%|▎ | 15/520 [01:29<32:20, 3.84s/it] {'loss': 1.4358, 'grad_norm': 0.0021188161046025206, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:29<32:20, 3.84s/it] 3%|▎ | 16/520 [01:33<31:39, 3.77s/it] {'loss': 1.3951, 'grad_norm': 0.0020716135541312395, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:33<31:39, 3.77s/it] 3%|▎ | 17/520 [01:36<31:14, 3.73s/it] {'loss': 1.4869, 'grad_norm': 0.002289809511277903, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:36<31:14, 3.73s/it] 3%|▎ | 18/520 [01:40<30:54, 3.69s/it] {'loss': 1.3396, 'grad_norm': 0.0020620481682484563, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:40<30:54, 3.69s/it] 4%|▎ | 19/520 [01:43<30:41, 3.68s/it] {'loss': 1.3898, 'grad_norm': 0.0018664012482386507, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:43<30:41, 3.68s/it] 4%|▍ | 20/520 [01:47<30:28, 3.66s/it] {'loss': 1.3268, 'grad_norm': 0.0020029758377260462, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:47<30:28, 3.66s/it] 4%|▍ | 21/520 [01:51<30:22, 3.65s/it] {'loss': 1.377, 'grad_norm': 0.0021594614033767084, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:51<30:22, 3.65s/it] 4%|▍ | 22/520 [01:54<30:13, 3.64s/it] {'loss': 1.4783, 'grad_norm': 0.001811149017925863, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:54<30:13, 3.64s/it] 4%|▍ | 23/520 [01:58<30:05, 3.63s/it] {'loss': 1.4141, 'grad_norm': 0.0016772288853989523, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:58<30:05, 3.63s/it] 5%|▍ | 24/520 [02:02<30:04, 3.64s/it] {'loss': 1.3635, 'grad_norm': 0.001664240584567128, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [02:02<30:04, 3.64s/it] 5%|▍ | 25/520 [02:05<29:58, 3.63s/it] {'loss': 1.4144, 'grad_norm': 0.001966264711313926, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [02:05<29:58, 3.63s/it] 5%|▌ | 26/520 [02:09<30:01, 3.65s/it] {'loss': 1.399, 'grad_norm': 0.0017235789427053887, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [02:09<30:01, 3.65s/it] 5%|▌ | 27/520 [02:13<30:05, 3.66s/it] {'loss': 1.3136, 'grad_norm': 0.0019522527004131182, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [02:13<30:05, 3.66s/it] 5%|▌ | 28/520 [02:16<29:56, 3.65s/it] {'loss': 1.3201, 'grad_norm': 0.001793020972982802, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [02:16<29:56, 3.65s/it] 6%|▌ | 29/520 [02:20<29:55, 3.66s/it] {'loss': 1.3479, 'grad_norm': 0.0020063443208629015, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:20<29:55, 3.66s/it] 6%|▌ | 30/520 [02:24<29:47, 3.65s/it] {'loss': 1.4381, 'grad_norm': 0.0016358587721122192, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:24<29:47, 3.65s/it] 6%|▌ | 31/520 [02:27<29:42, 3.65s/it] {'loss': 1.3214, 'grad_norm': 0.0015667279836828578, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:27<29:42, 3.65s/it] 6%|▌ | 32/520 [02:31<29:41, 3.65s/it] {'loss': 1.2971, 'grad_norm': 0.0019391702189680664, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:31<29:41, 3.65s/it] 6%|▋ | 33/520 [02:34<29:39, 3.65s/it] {'loss': 1.326, 'grad_norm': 0.001553208873250366, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:34<29:39, 3.65s/it] 7%|▋ | 34/520 [02:38<29:29, 3.64s/it] {'loss': 1.3199, 'grad_norm': 0.0016976603184296433, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:38<29:29, 3.64s/it] 7%|▋ | 35/520 [02:42<29:28, 3.65s/it] {'loss': 1.3273, 'grad_norm': 0.0020096652504682597, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:42<29:28, 3.65s/it] 7%|▋ | 36/520 [02:45<29:25, 3.65s/it] {'loss': 1.4246, 'grad_norm': 0.0016036117821842737, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:45<29:25, 3.65s/it] 7%|▋ | 37/520 [02:49<29:17, 3.64s/it] {'loss': 1.4138, 'grad_norm': 0.0014961713839357997, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:49<29:17, 3.64s/it] 7%|▋ | 38/520 [02:53<29:14, 3.64s/it] {'loss': 1.4956, 'grad_norm': 0.0015453474646583854, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:53<29:14, 3.64s/it] 8%|▊ | 39/520 [02:56<29:05, 3.63s/it] {'loss': 1.35, 'grad_norm': 0.0019013099557165635, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:56<29:05, 3.63s/it] 8%|▊ | 40/520 [03:00<29:03, 3.63s/it] {'loss': 1.3808, 'grad_norm': 0.001602962427827385, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [03:00<29:03, 3.63s/it] 8%|▊ | 41/520 [03:04<29:05, 3.64s/it] {'loss': 1.3614, 'grad_norm': 0.0016989725240641649, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [03:04<29:05, 3.64s/it] 8%|▊ | 42/520 [03:07<29:12, 3.67s/it] {'loss': 1.3781, 'grad_norm': 0.002132217023897776, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [03:07<29:12, 3.67s/it] 8%|▊ | 43/520 [03:11<29:11, 3.67s/it] {'loss': 1.3202, 'grad_norm': 0.0014816022867465703, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [03:11<29:11, 3.67s/it] 8%|▊ | 44/520 [03:15<29:14, 3.69s/it] {'loss': 1.4119, 'grad_norm': 0.0015256218982863713, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [03:15<29:14, 3.69s/it] 9%|▊ | 45/520 [03:19<29:26, 3.72s/it] {'loss': 1.3783, 'grad_norm': 0.0016268991841963789, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:19<29:26, 3.72s/it] 9%|▉ | 46/520 [03:22<29:33, 3.74s/it] {'loss': 1.4644, 'grad_norm': 0.001491537691576631, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:22<29:33, 3.74s/it] 9%|▉ | 47/520 [03:26<29:37, 3.76s/it] {'loss': 1.3661, 'grad_norm': 0.0016049440174709343, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:26<29:37, 3.76s/it] 9%|▉ | 48/520 [03:30<29:40, 3.77s/it] {'loss': 1.3462, 'grad_norm': 0.0018864326025468004, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:30<29:40, 3.77s/it] 9%|▉ | 49/520 [03:34<29:34, 3.77s/it] {'loss': 1.3878, 'grad_norm': 0.0015398841453754522, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:34<29:34, 3.77s/it] 10%|▉ | 50/520 [03:37<29:17, 3.74s/it] {'loss': 1.3772, 'grad_norm': 0.0015359608299247009, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:37<29:17, 3.74s/it] 10%|▉ | 51/520 [03:41<29:22, 3.76s/it] {'loss': 1.3124, 'grad_norm': 0.0018560383991418476, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:41<29:22, 3.76s/it] 10%|█ | 52/520 [03:45<29:28, 3.78s/it] {'loss': 1.4356, 'grad_norm': 0.0018542185800010263, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:45<29:28, 3.78s/it] 10%|█ | 53/520 [03:49<29:28, 3.79s/it] {'loss': 1.4214, 'grad_norm': 0.0016510443778822664, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:49<29:28, 3.79s/it] 10%|█ | 54/520 [03:52<29:08, 3.75s/it] {'loss': 1.3459, 'grad_norm': 0.0017106223464816745, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:52<29:08, 3.75s/it] 11%|█ | 55/520 [03:56<28:44, 3.71s/it] {'loss': 1.3189, 'grad_norm': 0.0018709306861592818, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:56<28:44, 3.71s/it] 11%|█ | 56/520 [04:00<28:25, 3.68s/it] {'loss': 1.442, 'grad_norm': 0.0016551760362059295, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [04:00<28:25, 3.68s/it] 11%|█ | 57/520 [04:03<28:10, 3.65s/it] {'loss': 1.3093, 'grad_norm': 0.0021609237512505244, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [04:03<28:10, 3.65s/it] 11%|█ | 58/520 [04:07<27:59, 3.64s/it] {'loss': 1.4682, 'grad_norm': 0.0017686954661947536, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [04:07<27:59, 3.64s/it] 11%|█▏ | 59/520 [04:10<27:53, 3.63s/it] {'loss': 1.2807, 'grad_norm': 0.0014190438154631118, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [04:10<27:53, 3.63s/it] 12%|█▏ | 60/520 [04:14<28:12, 3.68s/it] {'loss': 1.3735, 'grad_norm': 0.0016432843986947754, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:14<28:12, 3.68s/it] 12%|█▏ | 61/520 [04:18<28:35, 3.74s/it] {'loss': 1.3689, 'grad_norm': 0.001816233287151139, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:18<28:35, 3.74s/it] 12%|█▏ | 62/520 [04:22<28:24, 3.72s/it] {'loss': 1.3546, 'grad_norm': 0.0017257078208614313, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:22<28:24, 3.72s/it] 12%|█▏ | 63/520 [04:25<28:09, 3.70s/it] {'loss': 1.3428, 'grad_norm': 0.001488784147583645, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:25<28:09, 3.70s/it] 12%|█▏ | 64/520 [04:29<28:14, 3.72s/it] {'loss': 1.3732, 'grad_norm': 0.0018438315570880202, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:29<28:14, 3.72s/it] 12%|█▎ | 65/520 [04:33<28:14, 3.72s/it] {'loss': 1.3728, 'grad_norm': 0.0019765342352975794, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:33<28:14, 3.72s/it] 13%|█▎ | 66/520 [04:37<28:12, 3.73s/it] {'loss': 1.3266, 'grad_norm': 0.0014052899532052917, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:37<28:12, 3.73s/it] 13%|█▎ | 67/520 [04:40<28:13, 3.74s/it] {'loss': 1.2355, 'grad_norm': 0.001552309914818474, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:40<28:13, 3.74s/it] 13%|█▎ | 68/520 [04:44<28:11, 3.74s/it] {'loss': 1.2969, 'grad_norm': 0.00184970492616638, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:44<28:11, 3.74s/it] 13%|█▎ | 69/520 [04:48<28:17, 3.76s/it] {'loss': 1.283, 'grad_norm': 0.001737016577485442, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:48<28:17, 3.76s/it] 13%|█▎ | 70/520 [04:52<28:13, 3.76s/it] {'loss': 1.3119, 'grad_norm': 0.0017918048314733937, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:52<28:13, 3.76s/it] 14%|█▎ | 71/520 [04:56<28:17, 3.78s/it] {'loss': 1.255, 'grad_norm': 0.0014862110688546623, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:56<28:17, 3.78s/it] 14%|█▍ | 72/520 [04:59<28:18, 3.79s/it] {'loss': 1.4, 'grad_norm': 0.0018157882592017536, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:59<28:18, 3.79s/it] 14%|█▍ | 73/520 [05:03<28:19, 3.80s/it] {'loss': 1.2289, 'grad_norm': 0.001529419021857312, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [05:03<28:19, 3.80s/it] 14%|█▍ | 74/520 [05:07<28:16, 3.80s/it] {'loss': 1.3394, 'grad_norm': 0.0017478819128016634, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [05:07<28:16, 3.80s/it] 14%|█▍ | 75/520 [05:11<28:15, 3.81s/it] {'loss': 1.2487, 'grad_norm': 0.0014592804746108917, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [05:11<28:15, 3.81s/it] 15%|█▍ | 76/520 [05:15<28:13, 3.81s/it] {'loss': 1.4137, 'grad_norm': 0.0014086468178046742, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:15<28:13, 3.81s/it] 15%|█▍ | 77/520 [05:19<28:11, 3.82s/it] {'loss': 1.1692, 'grad_norm': 0.0016094476939757254, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:19<28:11, 3.82s/it] 15%|█▌ | 78/520 [05:22<28:10, 3.83s/it] {'loss': 1.2931, 'grad_norm': 0.0016037421654258414, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:22<28:10, 3.83s/it] 15%|█▌ | 79/520 [05:26<28:06, 3.82s/it] {'loss': 1.2806, 'grad_norm': 0.0014596025903962897, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:26<28:06, 3.82s/it] 15%|█▌ | 80/520 [05:30<28:05, 3.83s/it] {'loss': 1.4005, 'grad_norm': 0.0015058985374812846, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:30<28:05, 3.83s/it] 16%|█▌ | 81/520 [05:34<28:11, 3.85s/it] {'loss': 1.418, 'grad_norm': 0.001858855294576831, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:34<28:11, 3.85s/it] 16%|█▌ | 82/520 [05:38<28:03, 3.84s/it] {'loss': 1.346, 'grad_norm': 0.0014348156739349932, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:38<28:03, 3.84s/it] 16%|█▌ | 83/520 [05:42<28:10, 3.87s/it] {'loss': 1.3634, 'grad_norm': 0.0016100706566857263, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:42<28:10, 3.87s/it] 16%|█▌ | 84/520 [05:46<28:03, 3.86s/it] {'loss': 1.3741, 'grad_norm': 0.0015489920071478536, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:46<28:03, 3.86s/it] 16%|█▋ | 85/520 [05:49<28:01, 3.86s/it] {'loss': 1.4131, 'grad_norm': 0.0014938440612967232, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:49<28:01, 3.86s/it] 17%|█▋ | 86/520 [05:53<27:55, 3.86s/it] {'loss': 1.4067, 'grad_norm': 0.001517705760107213, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:53<27:55, 3.86s/it] 17%|█▋ | 87/520 [05:57<27:51, 3.86s/it] {'loss': 1.3471, 'grad_norm': 0.001407443712436037, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:57<27:51, 3.86s/it] 17%|█▋ | 88/520 [06:01<27:52, 3.87s/it] {'loss': 1.2949, 'grad_norm': 0.0013192183294392066, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [06:01<27:52, 3.87s/it] 17%|█▋ | 89/520 [06:05<27:45, 3.86s/it] {'loss': 1.3663, 'grad_norm': 0.0015980483640555786, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [06:05<27:45, 3.86s/it] 17%|█▋ | 90/520 [06:09<27:44, 3.87s/it] {'loss': 1.2957, 'grad_norm': 0.0014374343472624163, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [06:09<27:44, 3.87s/it] 18%|█▊ | 91/520 [06:13<27:40, 3.87s/it] {'loss': 1.3668, 'grad_norm': 0.0014159802941299162, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:13<27:40, 3.87s/it] 18%|█▊ | 92/520 [06:16<27:27, 3.85s/it] {'loss': 1.2969, 'grad_norm': 0.0014818479021955243, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:16<27:27, 3.85s/it] 18%|█▊ | 93/520 [06:20<26:58, 3.79s/it] {'loss': 1.3268, 'grad_norm': 0.0017007862740443592, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:20<26:58, 3.79s/it] 18%|█▊ | 94/520 [06:24<26:39, 3.75s/it] {'loss': 1.4029, 'grad_norm': 0.0015353661906376344, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:24<26:39, 3.75s/it] 18%|█▊ | 95/520 [06:28<26:45, 3.78s/it] {'loss': 1.3004, 'grad_norm': 0.0021232444426950146, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:28<26:45, 3.78s/it] 18%|█▊ | 96/520 [06:31<26:58, 3.82s/it] {'loss': 1.3136, 'grad_norm': 0.0012496816679108888, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:31<26:58, 3.82s/it] 19%|█▊ | 97/520 [06:35<27:04, 3.84s/it] {'loss': 1.2804, 'grad_norm': 0.001828027192265314, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:35<27:04, 3.84s/it] 19%|█▉ | 98/520 [06:39<27:04, 3.85s/it] {'loss': 1.2819, 'grad_norm': 0.0013500927262904364, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:39<27:04, 3.85s/it] 19%|█▉ | 99/520 [06:43<26:59, 3.85s/it] {'loss': 1.2959, 'grad_norm': 0.0017063094032188798, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:43<26:59, 3.85s/it] 19%|█▉ | 100/520 [06:47<26:31, 3.79s/it] {'loss': 1.2828, 'grad_norm': 0.001453100114967185, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:47<26:31, 3.79s/it] 19%|█▉ | 101/520 [06:50<26:08, 3.74s/it] {'loss': 1.2956, 'grad_norm': 0.0014021070844998807, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:50<26:08, 3.74s/it] 20%|█▉ | 102/520 [06:54<25:52, 3.71s/it] {'loss': 1.3086, 'grad_norm': 0.0016058839682712978, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:54<25:52, 3.71s/it] 20%|█▉ | 103/520 [06:58<25:40, 3.69s/it] {'loss': 1.232, 'grad_norm': 0.001353721856588024, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:58<25:40, 3.69s/it] 20%|██ | 104/520 [07:01<25:29, 3.68s/it] {'loss': 1.3051, 'grad_norm': 0.0014073693146852838, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [07:01<25:29, 3.68s/it] 20%|██ | 105/520 [07:05<25:26, 3.68s/it] {'loss': 1.2964, 'grad_norm': 0.0014305857973075486, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [07:05<25:26, 3.68s/it] 20%|██ | 106/520 [07:09<25:18, 3.67s/it] {'loss': 1.3071, 'grad_norm': 0.0014422510221787724, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [07:09<25:18, 3.67s/it] 21%|██ | 107/520 [07:12<25:36, 3.72s/it] {'loss': 1.285, 'grad_norm': 0.0014498440927674708, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [07:12<25:36, 3.72s/it] 21%|██ | 108/520 [07:16<25:39, 3.74s/it] {'loss': 1.2592, 'grad_norm': 0.0014824988542424455, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [07:16<25:39, 3.74s/it] 21%|██ | 109/520 [07:20<25:29, 3.72s/it] {'loss': 1.261, 'grad_norm': 0.0012464000954284973, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:20<25:29, 3.72s/it] 21%|██ | 110/520 [07:24<25:20, 3.71s/it] {'loss': 1.4403, 'grad_norm': 0.00161318801486354, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:24<25:20, 3.71s/it] 21%|██▏ | 111/520 [07:27<25:08, 3.69s/it] {'loss': 1.439, 'grad_norm': 0.0015701033128610277, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:27<25:08, 3.69s/it] 22%|██▏ | 112/520 [07:31<25:03, 3.68s/it] {'loss': 1.3237, 'grad_norm': 0.0013726781873942802, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:31<25:03, 3.68s/it] 22%|██▏ | 113/520 [07:35<24:58, 3.68s/it] {'loss': 1.2177, 'grad_norm': 0.0014400529619579786, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:35<24:58, 3.68s/it] 22%|██▏ | 114/520 [07:38<24:53, 3.68s/it] {'loss': 1.3054, 'grad_norm': 0.001283913777439465, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:38<24:53, 3.68s/it] 22%|██▏ | 115/520 [07:42<24:53, 3.69s/it] {'loss': 1.4103, 'grad_norm': 0.0013241186873069533, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:42<24:53, 3.69s/it] 22%|██▏ | 116/520 [07:46<24:51, 3.69s/it] {'loss': 1.4228, 'grad_norm': 0.001435032685734865, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:46<24:51, 3.69s/it] 22%|██▎ | 117/520 [07:49<25:01, 3.73s/it] {'loss': 1.3891, 'grad_norm': 0.0014254398560337273, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:50<25:01, 3.73s/it] 23%|██▎ | 118/520 [07:53<24:55, 3.72s/it] {'loss': 1.2946, 'grad_norm': 0.0012791894155927716, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:53<24:55, 3.72s/it] 23%|██▎ | 119/520 [07:57<24:47, 3.71s/it] {'loss': 1.2511, 'grad_norm': 0.001564896853025713, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:57<24:47, 3.71s/it] 23%|██▎ | 120/520 [08:01<24:49, 3.72s/it] {'loss': 1.2687, 'grad_norm': 0.0019594443301083867, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [08:01<24:49, 3.72s/it] 23%|██▎ | 121/520 [08:04<24:39, 3.71s/it] {'loss': 1.3239, 'grad_norm': 0.0015452398456985027, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [08:04<24:39, 3.71s/it] 23%|██▎ | 122/520 [08:08<24:35, 3.71s/it] {'loss': 1.2271, 'grad_norm': 0.0013209417646865426, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [08:08<24:35, 3.71s/it] 24%|██▎ | 123/520 [08:12<24:33, 3.71s/it] {'loss': 1.352, 'grad_norm': 0.0013915543249720214, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [08:12<24:33, 3.71s/it] 24%|██▍ | 124/520 [08:15<24:27, 3.71s/it] {'loss': 1.2902, 'grad_norm': 0.0014202533275848473, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:15<24:27, 3.71s/it] 24%|██▍ | 125/520 [08:19<24:23, 3.70s/it] {'loss': 1.2858, 'grad_norm': 0.0014873084613700402, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:19<24:23, 3.70s/it] 24%|██▍ | 126/520 [08:24<25:36, 3.90s/it] {'loss': 1.2779, 'grad_norm': 0.0012482808813141727, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:24<25:36, 3.90s/it] 24%|██▍ | 127/520 [08:27<25:05, 3.83s/it] {'loss': 1.2679, 'grad_norm': 0.0016618958436618258, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:27<25:05, 3.83s/it] 25%|██▍ | 128/520 [08:31<24:45, 3.79s/it] {'loss': 1.3176, 'grad_norm': 0.001633472585446546, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:31<24:45, 3.79s/it] 25%|██▍ | 129/520 [08:35<24:25, 3.75s/it] {'loss': 1.2684, 'grad_norm': 0.0013342062286773486, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:35<24:25, 3.75s/it] 25%|██▌ | 130/520 [08:38<24:11, 3.72s/it] {'loss': 1.303, 'grad_norm': 0.00120760009086674, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:38<24:11, 3.72s/it] 25%|██▌ | 131/520 [08:42<23:59, 3.70s/it] {'loss': 1.248, 'grad_norm': 0.0012061846359147168, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:42<23:59, 3.70s/it] 25%|██▌ | 132/520 [08:45<23:49, 3.68s/it] {'loss': 1.3545, 'grad_norm': 0.0014264332548528712, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:45<23:49, 3.68s/it] 26%|██▌ | 133/520 [08:49<23:45, 3.68s/it] {'loss': 1.265, 'grad_norm': 0.0015188224710064114, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:49<23:45, 3.68s/it] 26%|██▌ | 134/520 [08:53<23:40, 3.68s/it] {'loss': 1.3505, 'grad_norm': 0.0013842010407447275, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:53<23:40, 3.68s/it] 26%|██▌ | 135/520 [08:56<23:36, 3.68s/it] {'loss': 1.3958, 'grad_norm': 0.0013412520324347374, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:57<23:36, 3.68s/it] 26%|██▌ | 136/520 [09:00<23:32, 3.68s/it] {'loss': 1.3375, 'grad_norm': 0.0013168154221347252, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [09:00<23:32, 3.68s/it] 26%|██▋ | 137/520 [09:04<23:32, 3.69s/it] {'loss': 1.2521, 'grad_norm': 0.0016624436809059328, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [09:04<23:32, 3.69s/it] 27%|██▋ | 138/520 [09:08<23:28, 3.69s/it] {'loss': 1.2672, 'grad_norm': 0.0012692498904692362, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [09:08<23:28, 3.69s/it] 27%|██▋ | 139/520 [09:11<23:25, 3.69s/it] {'loss': 1.1595, 'grad_norm': 0.001283746829766926, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:11<23:25, 3.69s/it] 27%|██▋ | 140/520 [09:15<23:21, 3.69s/it] {'loss': 1.2965, 'grad_norm': 0.0012393343114479002, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:15<23:21, 3.69s/it] 27%|██▋ | 141/520 [09:19<23:14, 3.68s/it] {'loss': 1.377, 'grad_norm': 0.001312302767393209, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:19<23:14, 3.68s/it] 27%|██▋ | 142/520 [09:22<23:10, 3.68s/it] {'loss': 1.3074, 'grad_norm': 0.0011982110602456733, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:22<23:10, 3.68s/it] 28%|██▊ | 143/520 [09:26<23:07, 3.68s/it] {'loss': 1.2958, 'grad_norm': 0.001583377616721206, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:26<23:07, 3.68s/it] 28%|██▊ | 144/520 [09:30<23:02, 3.68s/it] {'loss': 1.2605, 'grad_norm': 0.0013735917986003176, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:30<23:02, 3.68s/it] 28%|██▊ | 145/520 [09:33<23:03, 3.69s/it] {'loss': 1.2009, 'grad_norm': 0.001278665813280159, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:33<23:03, 3.69s/it] 28%|██▊ | 146/520 [09:37<22:58, 3.69s/it] {'loss': 1.3742, 'grad_norm': 0.0013126569411955032, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:37<22:58, 3.69s/it] 28%|██▊ | 147/520 [09:41<23:08, 3.72s/it] {'loss': 1.2405, 'grad_norm': 0.0014158899946133897, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:41<23:08, 3.72s/it] 28%|██▊ | 148/520 [09:45<23:16, 3.75s/it] {'loss': 1.271, 'grad_norm': 0.0013692006185035, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:45<23:16, 3.75s/it] 29%|██▊ | 149/520 [09:49<23:26, 3.79s/it] {'loss': 1.2125, 'grad_norm': 0.001309464515051762, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:49<23:26, 3.79s/it] 29%|██▉ | 150/520 [09:52<23:30, 3.81s/it] {'loss': 1.4408, 'grad_norm': 0.00137073137208805, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:52<23:30, 3.81s/it] 29%|██▉ | 151/520 [09:56<23:32, 3.83s/it] {'loss': 1.258, 'grad_norm': 0.0012889908575197007, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:56<23:32, 3.83s/it] 29%|██▉ | 152/520 [10:00<23:31, 3.83s/it] {'loss': 1.2348, 'grad_norm': 0.0013673047691029373, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [10:00<23:31, 3.83s/it] 29%|██▉ | 153/520 [10:04<23:31, 3.85s/it] {'loss': 1.2631, 'grad_norm': 0.0012943292330016222, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [10:04<23:31, 3.85s/it] 30%|██▉ | 154/520 [10:08<23:27, 3.85s/it] {'loss': 1.3497, 'grad_norm': 0.001336795816190581, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [10:08<23:27, 3.85s/it] 30%|██▉ | 155/520 [10:12<23:31, 3.87s/it] {'loss': 1.2625, 'grad_norm': 0.0013208055178653376, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:12<23:31, 3.87s/it] 30%|███ | 156/520 [10:16<23:26, 3.86s/it] {'loss': 1.2865, 'grad_norm': 0.0015492812343712115, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [10:16<23:26, 3.86s/it] 30%|███ | 157/520 [10:19<23:24, 3.87s/it] {'loss': 1.365, 'grad_norm': 0.0013537153773068298, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:19<23:24, 3.87s/it] 30%|███ | 158/520 [10:23<23:32, 3.90s/it] {'loss': 1.2668, 'grad_norm': 0.0014887238852697084, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:23<23:32, 3.90s/it] 31%|███ | 159/520 [10:27<23:22, 3.89s/it] {'loss': 1.3079, 'grad_norm': 0.0012917659616224274, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:27<23:22, 3.89s/it] 31%|███ | 160/520 [10:31<23:17, 3.88s/it] {'loss': 1.3166, 'grad_norm': 0.0013622935117715781, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:31<23:17, 3.88s/it] 31%|███ | 161/520 [10:35<23:01, 3.85s/it] {'loss': 1.2968, 'grad_norm': 0.0013536942259136677, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:35<23:01, 3.85s/it] 31%|███ | 162/520 [10:39<22:39, 3.80s/it] {'loss': 1.2932, 'grad_norm': 0.0012902547025164077, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:39<22:39, 3.80s/it] 31%|███▏ | 163/520 [10:42<22:23, 3.76s/it] {'loss': 1.1875, 'grad_norm': 0.001727938076928846, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:42<22:23, 3.76s/it] 32%|███▏ | 164/520 [10:46<22:09, 3.74s/it] {'loss': 1.1578, 'grad_norm': 0.0013640506443038305, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:46<22:09, 3.74s/it] 32%|███▏ | 165/520 [10:50<22:03, 3.73s/it] {'loss': 1.2944, 'grad_norm': 0.0012511609246064741, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:50<22:03, 3.73s/it] 32%|███▏ | 166/520 [10:53<21:49, 3.70s/it] {'loss': 1.2801, 'grad_norm': 0.0014528428953176017, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:53<21:49, 3.70s/it] 32%|███▏ | 167/520 [10:57<21:42, 3.69s/it] {'loss': 1.2756, 'grad_norm': 0.001401454217021591, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:57<21:42, 3.69s/it] 32%|███▏ | 168/520 [11:01<21:41, 3.70s/it] {'loss': 1.2148, 'grad_norm': 0.0013333716013333977, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [11:01<21:41, 3.70s/it] 32%|███▎ | 169/520 [11:04<21:34, 3.69s/it] {'loss': 1.2922, 'grad_norm': 0.0013601221758443218, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [11:04<21:34, 3.69s/it] 33%|███▎ | 170/520 [11:08<21:26, 3.68s/it] {'loss': 1.2425, 'grad_norm': 0.0011579857976474646, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [11:08<21:26, 3.68s/it] 33%|███▎ | 171/520 [11:12<21:26, 3.69s/it] {'loss': 1.2209, 'grad_norm': 0.0013859609677863193, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:12<21:26, 3.69s/it] 33%|███▎ | 172/520 [11:15<21:15, 3.66s/it] {'loss': 1.2965, 'grad_norm': 0.0013573970117397463, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:15<21:15, 3.66s/it] 33%|███▎ | 173/520 [11:19<21:11, 3.66s/it] {'loss': 1.2408, 'grad_norm': 0.0012783729967962107, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:19<21:11, 3.66s/it] 33%|███▎ | 174/520 [11:23<21:07, 3.66s/it] {'loss': 1.2853, 'grad_norm': 0.0013266740232981541, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:23<21:07, 3.66s/it] 34%|███▎ | 175/520 [11:26<21:05, 3.67s/it] {'loss': 1.1996, 'grad_norm': 0.0012078472971007758, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:26<21:05, 3.67s/it] 34%|███▍ | 176/520 [11:30<20:59, 3.66s/it] {'loss': 1.3293, 'grad_norm': 0.0012849058455319418, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:30<20:59, 3.66s/it] 34%|███▍ | 177/520 [11:34<20:54, 3.66s/it] {'loss': 1.1975, 'grad_norm': 0.0014178134139691766, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:34<20:54, 3.66s/it] 34%|███▍ | 178/520 [11:37<20:48, 3.65s/it] {'loss': 1.2665, 'grad_norm': 0.0013760893995459426, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:37<20:48, 3.65s/it] 34%|███▍ | 179/520 [11:41<20:49, 3.66s/it] {'loss': 1.3496, 'grad_norm': 0.001282713433410539, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:41<20:49, 3.66s/it] 35%|███▍ | 180/520 [11:45<21:06, 3.72s/it] {'loss': 1.2714, 'grad_norm': 0.0014346957255660555, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:45<21:06, 3.72s/it] 35%|███▍ | 181/520 [11:49<21:14, 3.76s/it] {'loss': 1.2374, 'grad_norm': 0.0011749907781257534, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:49<21:14, 3.76s/it] 35%|███▌ | 182/520 [11:53<21:17, 3.78s/it] {'loss': 1.251, 'grad_norm': 0.0012986894928306307, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:53<21:17, 3.78s/it] 35%|███▌ | 183/520 [11:56<21:20, 3.80s/it] {'loss': 1.2778, 'grad_norm': 0.0012725857624534033, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:56<21:20, 3.80s/it] 35%|███▌ | 184/520 [12:00<21:18, 3.81s/it] {'loss': 1.209, 'grad_norm': 0.0013449013701513825, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [12:00<21:18, 3.81s/it] 36%|███▌ | 185/520 [12:04<21:36, 3.87s/it] {'loss': 1.3541, 'grad_norm': 0.0012848506249633143, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [12:04<21:36, 3.87s/it] 36%|███▌ | 186/520 [12:08<21:26, 3.85s/it] {'loss': 1.232, 'grad_norm': 0.001318200942008746, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [12:08<21:26, 3.85s/it] 36%|███▌ | 187/520 [12:12<21:21, 3.85s/it] {'loss': 1.2311, 'grad_norm': 0.001470188983809285, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:12<21:21, 3.85s/it] 36%|███▌ | 188/520 [12:16<21:10, 3.83s/it] {'loss': 1.3122, 'grad_norm': 0.0013688623421607103, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:16<21:10, 3.83s/it] 36%|███▋ | 189/520 [12:19<21:07, 3.83s/it] {'loss': 1.3219, 'grad_norm': 0.0011703619526866476, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:19<21:07, 3.83s/it] 37%|███▋ | 190/520 [12:23<21:00, 3.82s/it] {'loss': 1.2454, 'grad_norm': 0.0013754279541607153, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:23<21:00, 3.82s/it] 37%|███▋ | 191/520 [12:27<21:00, 3.83s/it] {'loss': 1.1976, 'grad_norm': 0.001170130253843409, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:27<21:00, 3.83s/it] 37%|███▋ | 192/520 [12:31<21:01, 3.85s/it] {'loss': 1.2796, 'grad_norm': 0.0012748817869082532, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:31<21:01, 3.85s/it] 37%|███▋ | 193/520 [12:35<20:59, 3.85s/it] {'loss': 1.2442, 'grad_norm': 0.001563506036552951, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:35<20:59, 3.85s/it] 37%|███▋ | 194/520 [12:39<20:55, 3.85s/it] {'loss': 1.1409, 'grad_norm': 0.0014770534208083481, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:39<20:55, 3.85s/it] 38%|███▊ | 195/520 [12:43<20:50, 3.85s/it] {'loss': 1.3007, 'grad_norm': 0.0013091368423535298, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:43<20:50, 3.85s/it] 38%|███▊ | 196/520 [12:46<20:44, 3.84s/it] {'loss': 1.2706, 'grad_norm': 0.0014512760761639472, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:46<20:44, 3.84s/it] 38%|███▊ | 197/520 [12:50<20:39, 3.84s/it] {'loss': 1.222, 'grad_norm': 0.0012347092428662799, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:50<20:39, 3.84s/it] 38%|███▊ | 198/520 [12:54<20:33, 3.83s/it] {'loss': 1.2943, 'grad_norm': 0.0014503377313499928, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:54<20:33, 3.83s/it] 38%|███▊ | 199/520 [12:58<20:29, 3.83s/it] {'loss': 1.2124, 'grad_norm': 0.00130215728554093, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:58<20:29, 3.83s/it] 38%|███▊ | 200/520 [13:02<20:25, 3.83s/it] {'loss': 1.1897, 'grad_norm': 0.0012864432139381434, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [13:02<20:25, 3.83s/it] 39%|███▊ | 201/520 [13:06<20:22, 3.83s/it] {'loss': 1.2184, 'grad_norm': 0.0011406668850848817, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [13:06<20:22, 3.83s/it] 39%|███▉ | 202/520 [13:09<20:17, 3.83s/it] {'loss': 1.2094, 'grad_norm': 0.0012326564747281834, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [13:09<20:17, 3.83s/it] 39%|███▉ | 203/520 [13:13<20:15, 3.83s/it] {'loss': 1.2692, 'grad_norm': 0.001350025219845292, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:13<20:15, 3.83s/it] 39%|███▉ | 204/520 [13:17<20:09, 3.83s/it] {'loss': 1.2788, 'grad_norm': 0.0013066658495833364, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:17<20:09, 3.83s/it] 39%|███▉ | 205/520 [13:21<20:06, 3.83s/it] {'loss': 1.2181, 'grad_norm': 0.00126760275680858, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:21<20:06, 3.83s/it] 40%|███▉ | 206/520 [13:25<20:04, 3.84s/it] {'loss': 1.3177, 'grad_norm': 0.001288027025929742, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:25<20:04, 3.84s/it] 40%|███▉ | 207/520 [13:28<19:57, 3.83s/it] {'loss': 1.1966, 'grad_norm': 0.0012659254277028733, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:28<19:57, 3.83s/it] 40%|████ | 208/520 [13:32<19:54, 3.83s/it] {'loss': 1.308, 'grad_norm': 0.001395135242105675, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:32<19:54, 3.83s/it] 40%|████ | 209/520 [13:36<19:48, 3.82s/it] {'loss': 1.2178, 'grad_norm': 0.0012267138058650571, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:36<19:48, 3.82s/it] 40%|████ | 210/520 [13:40<19:51, 3.84s/it] {'loss': 1.2941, 'grad_norm': 0.0013519969649780673, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:40<19:51, 3.84s/it] 41%|████ | 211/520 [13:44<19:47, 3.84s/it] {'loss': 1.2903, 'grad_norm': 0.0012458081457658618, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:44<19:47, 3.84s/it] 41%|████ | 212/520 [13:48<19:42, 3.84s/it] {'loss': 1.2812, 'grad_norm': 0.0012840260772401509, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:48<19:42, 3.84s/it] 41%|████ | 213/520 [13:52<19:44, 3.86s/it] {'loss': 1.2414, 'grad_norm': 0.0015508444791409765, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:52<19:44, 3.86s/it] 41%|████ | 214/520 [13:55<19:38, 3.85s/it] {'loss': 1.227, 'grad_norm': 0.0013237847175773367, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:55<19:38, 3.85s/it] 41%|████▏ | 215/520 [13:59<19:34, 3.85s/it] {'loss': 1.1516, 'grad_norm': 0.0012127806308351759, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:59<19:34, 3.85s/it] 42%|████▏ | 216/520 [14:03<19:32, 3.86s/it] {'loss': 1.1487, 'grad_norm': 0.001205240205840489, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [14:03<19:32, 3.86s/it] 42%|████▏ | 217/520 [14:07<19:28, 3.86s/it] {'loss': 1.2721, 'grad_norm': 0.0013000819386108755, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [14:07<19:28, 3.86s/it] 42%|████▏ | 218/520 [14:11<19:23, 3.85s/it] {'loss': 1.2603, 'grad_norm': 0.0013541306367358587, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [14:11<19:23, 3.85s/it] 42%|████▏ | 219/520 [14:15<19:16, 3.84s/it] {'loss': 1.2584, 'grad_norm': 0.001180378346830474, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:15<19:16, 3.84s/it] 42%|████▏ | 220/520 [14:19<19:11, 3.84s/it] {'loss': 1.1966, 'grad_norm': 0.0012309540924713013, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:19<19:11, 3.84s/it] 42%|████▎ | 221/520 [14:22<19:06, 3.83s/it] {'loss': 1.2643, 'grad_norm': 0.0013367118430250115, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:22<19:06, 3.83s/it] 43%|████▎ | 222/520 [14:26<19:02, 3.83s/it] {'loss': 1.1993, 'grad_norm': 0.0013769584181254498, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:26<19:02, 3.83s/it] 43%|████▎ | 223/520 [14:30<18:59, 3.84s/it] {'loss': 1.1952, 'grad_norm': 0.0012064404819558587, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:30<18:59, 3.84s/it] 43%|████▎ | 224/520 [14:34<18:58, 3.85s/it] {'loss': 1.2715, 'grad_norm': 0.0013172682876456106, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:34<18:58, 3.85s/it] 43%|████▎ | 225/520 [14:38<18:52, 3.84s/it] {'loss': 1.2012, 'grad_norm': 0.0012986019078067594, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:38<18:52, 3.84s/it] 43%|████▎ | 226/520 [14:42<18:48, 3.84s/it] {'loss': 1.3026, 'grad_norm': 0.0012165159228699721, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:42<18:48, 3.84s/it] 44%|████▎ | 227/520 [14:45<18:43, 3.83s/it] {'loss': 1.2837, 'grad_norm': 0.001182292747882909, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:45<18:43, 3.83s/it] 44%|████▍ | 228/520 [14:49<18:38, 3.83s/it] {'loss': 1.3078, 'grad_norm': 0.0012480461260661498, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:49<18:38, 3.83s/it] 44%|████▍ | 229/520 [14:53<18:35, 3.83s/it] {'loss': 1.2574, 'grad_norm': 0.0011736878887001624, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:53<18:35, 3.83s/it] 44%|████▍ | 230/520 [14:57<18:31, 3.83s/it] {'loss': 1.1484, 'grad_norm': 0.0012275065399223303, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:57<18:31, 3.83s/it] 44%|████▍ | 231/520 [15:01<18:27, 3.83s/it] {'loss': 1.2098, 'grad_norm': 0.0011773625824034052, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [15:01<18:27, 3.83s/it] 45%|████▍ | 232/520 [15:05<18:23, 3.83s/it] {'loss': 1.3397, 'grad_norm': 0.0014509497009184893, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [15:05<18:23, 3.83s/it] 45%|████▍ | 233/520 [15:08<18:20, 3.83s/it] {'loss': 1.237, 'grad_norm': 0.001464204300504781, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [15:08<18:20, 3.83s/it] 45%|████▌ | 234/520 [15:12<18:18, 3.84s/it] {'loss': 1.1651, 'grad_norm': 0.0014668516790054566, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [15:12<18:18, 3.84s/it] 45%|████▌ | 235/520 [15:16<18:14, 3.84s/it] {'loss': 1.219, 'grad_norm': 0.0013331095625527878, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:16<18:14, 3.84s/it] 45%|████▌ | 236/520 [15:20<18:11, 3.84s/it] {'loss': 1.2937, 'grad_norm': 0.0011696861288168334, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:20<18:11, 3.84s/it] 46%|████▌ | 237/520 [15:24<18:07, 3.84s/it] {'loss': 1.2913, 'grad_norm': 0.0012331845637226435, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:24<18:07, 3.84s/it] 46%|████▌ | 238/520 [15:28<18:03, 3.84s/it] {'loss': 1.2241, 'grad_norm': 0.0012914954455704774, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:28<18:03, 3.84s/it] 46%|████▌ | 239/520 [15:31<18:00, 3.85s/it] {'loss': 1.3009, 'grad_norm': 0.0013108667014731232, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:31<18:00, 3.85s/it] 46%|████▌ | 240/520 [15:35<17:57, 3.85s/it] {'loss': 1.106, 'grad_norm': 0.0012998079373994944, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:35<17:57, 3.85s/it] 46%|████▋ | 241/520 [15:39<17:54, 3.85s/it] {'loss': 1.1945, 'grad_norm': 0.0012189118341566654, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:39<17:54, 3.85s/it] 47%|████▋ | 242/520 [15:43<17:50, 3.85s/it] {'loss': 1.2083, 'grad_norm': 0.001183360726614087, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:43<17:50, 3.85s/it] 47%|████▋ | 243/520 [15:47<17:45, 3.85s/it] {'loss': 1.2035, 'grad_norm': 0.0012643188328280962, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:47<17:45, 3.85s/it] 47%|████▋ | 244/520 [15:51<17:40, 3.84s/it] {'loss': 1.3139, 'grad_norm': 0.0012824804199400308, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:51<17:40, 3.84s/it] 47%|████▋ | 245/520 [15:54<17:36, 3.84s/it] {'loss': 1.1811, 'grad_norm': 0.001391781738220609, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:54<17:36, 3.84s/it] 47%|████▋ | 246/520 [15:58<17:32, 3.84s/it] {'loss': 1.3262, 'grad_norm': 0.0012733240332116248, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:58<17:32, 3.84s/it] 48%|████▊ | 247/520 [16:02<17:29, 3.84s/it] {'loss': 1.3578, 'grad_norm': 0.0012739762673289494, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [16:02<17:29, 3.84s/it] 48%|████▊ | 248/520 [16:06<17:24, 3.84s/it] {'loss': 1.1865, 'grad_norm': 0.0012911331997083964, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [16:06<17:24, 3.84s/it] 48%|████▊ | 249/520 [16:10<17:22, 3.85s/it] {'loss': 1.2774, 'grad_norm': 0.0012276493678675961, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [16:10<17:22, 3.85s/it] 48%|████▊ | 250/520 [16:14<17:04, 3.79s/it] {'loss': 1.2151, 'grad_norm': 0.0013544980040702981, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:14<17:04, 3.79s/it] 48%|████▊ | 251/520 [16:17<16:49, 3.75s/it] {'loss': 1.2809, 'grad_norm': 0.001253415548314361, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:17<16:49, 3.75s/it] 48%|████▊ | 252/520 [16:21<16:37, 3.72s/it] {'loss': 1.2306, 'grad_norm': 0.0011926860309748489, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:21<16:37, 3.72s/it] 49%|████▊ | 253/520 [16:25<16:29, 3.71s/it] {'loss': 1.275, 'grad_norm': 0.0014669976465416525, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:25<16:29, 3.71s/it] 49%|████▉ | 254/520 [16:28<16:22, 3.69s/it] {'loss': 1.2141, 'grad_norm': 0.0011769045202359354, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:28<16:22, 3.69s/it] 49%|████▉ | 255/520 [16:32<16:15, 3.68s/it] {'loss': 1.2172, 'grad_norm': 0.0014198167333263688, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:32<16:15, 3.68s/it] 49%|████▉ | 256/520 [16:35<16:08, 3.67s/it] {'loss': 1.2671, 'grad_norm': 0.0013292650475838915, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:35<16:08, 3.67s/it] 49%|████▉ | 257/520 [16:39<16:01, 3.66s/it] {'loss': 1.2478, 'grad_norm': 0.001307188407854005, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:39<16:01, 3.66s/it] 50%|████▉ | 258/520 [16:43<15:59, 3.66s/it] {'loss': 1.2555, 'grad_norm': 0.0011311487758287758, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:43<15:59, 3.66s/it] 50%|████▉ | 259/520 [16:46<15:52, 3.65s/it] {'loss': 1.333, 'grad_norm': 0.0014543071467105115, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:46<15:52, 3.65s/it] 50%|█████ | 260/520 [16:50<15:49, 3.65s/it] {'loss': 1.2718, 'grad_norm': 0.0013449116461511784, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:50<15:49, 3.65s/it] 50%|█████ | 261/520 [16:54<15:45, 3.65s/it] {'loss': 1.2173, 'grad_norm': 0.0013107757612662208, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:54<15:45, 3.65s/it] 50%|█████ | 262/520 [16:57<15:42, 3.65s/it] {'loss': 1.1858, 'grad_norm': 0.0012699669364174522, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:57<15:42, 3.65s/it] 51%|█████ | 263/520 [17:01<15:37, 3.65s/it] {'loss': 1.2384, 'grad_norm': 0.0012341165586870206, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [17:01<15:37, 3.65s/it] 51%|█████ | 264/520 [17:05<15:34, 3.65s/it] {'loss': 1.2829, 'grad_norm': 0.0012554608018127448, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [17:05<15:34, 3.65s/it] 51%|█████ | 265/520 [17:08<15:30, 3.65s/it] {'loss': 1.1951, 'grad_norm': 0.001384877530911316, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [17:08<15:30, 3.65s/it] 51%|█████ | 266/520 [17:12<15:29, 3.66s/it] {'loss': 1.0676, 'grad_norm': 0.0012313342244768865, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [17:12<15:29, 3.66s/it] 51%|█████▏ | 267/520 [17:16<15:25, 3.66s/it] {'loss': 1.1966, 'grad_norm': 0.0013627465592265515, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:16<15:25, 3.66s/it] 52%|█████▏ | 268/520 [17:19<15:21, 3.66s/it] {'loss': 1.3485, 'grad_norm': 0.0017921422221820233, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:19<15:21, 3.66s/it] 52%|█████▏ | 269/520 [17:23<15:16, 3.65s/it] {'loss': 1.2974, 'grad_norm': 0.001331431093816563, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:23<15:16, 3.65s/it] 52%|█████▏ | 270/520 [17:27<15:13, 3.66s/it] {'loss': 1.1647, 'grad_norm': 0.0011519969861010825, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:27<15:13, 3.66s/it] 52%|█████▏ | 271/520 [17:30<15:10, 3.66s/it] {'loss': 1.2842, 'grad_norm': 0.0013442517607652653, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:30<15:10, 3.66s/it] 52%|█████▏ | 272/520 [17:34<15:07, 3.66s/it] {'loss': 1.1828, 'grad_norm': 0.0013307927603700962, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:34<15:07, 3.66s/it] 52%|█████▎ | 273/520 [17:38<15:03, 3.66s/it] {'loss': 1.299, 'grad_norm': 0.0012785286128753363, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:38<15:03, 3.66s/it] 53%|█████▎ | 274/520 [17:41<14:59, 3.66s/it] {'loss': 1.2606, 'grad_norm': 0.0015170388570930721, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:41<14:59, 3.66s/it] 53%|█████▎ | 275/520 [17:45<14:55, 3.65s/it] {'loss': 1.2055, 'grad_norm': 0.001472516536373414, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:45<14:55, 3.65s/it] 53%|█████▎ | 276/520 [17:49<14:51, 3.65s/it] {'loss': 1.2711, 'grad_norm': 0.0015450017794852793, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:49<14:51, 3.65s/it] 53%|█████▎ | 277/520 [17:52<14:48, 3.66s/it] {'loss': 1.3016, 'grad_norm': 0.001188956028832727, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:52<14:48, 3.66s/it] 53%|█████▎ | 278/520 [17:56<14:45, 3.66s/it] {'loss': 1.1546, 'grad_norm': 0.0011170841532929473, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:56<14:45, 3.66s/it] 54%|█████▎ | 279/520 [18:00<14:42, 3.66s/it] {'loss': 1.1865, 'grad_norm': 0.001367801921019826, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [18:00<14:42, 3.66s/it] 54%|█████▍ | 280/520 [18:03<14:44, 3.69s/it] {'loss': 1.2026, 'grad_norm': 0.0015615590684538013, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [18:03<14:44, 3.69s/it] 54%|█████▍ | 281/520 [18:07<14:39, 3.68s/it] {'loss': 1.3009, 'grad_norm': 0.0013462935984653388, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [18:07<14:39, 3.68s/it] 54%|█████▍ | 282/520 [18:11<14:34, 3.68s/it] {'loss': 1.1667, 'grad_norm': 0.001164352114941471, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [18:11<14:34, 3.68s/it] 54%|█████▍ | 283/520 [18:14<14:31, 3.68s/it] {'loss': 1.3121, 'grad_norm': 0.0013477424291378245, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:14<14:31, 3.68s/it] 55%|█████▍ | 284/520 [18:18<14:25, 3.67s/it] {'loss': 1.1842, 'grad_norm': 0.0013287990606710364, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:18<14:25, 3.67s/it] 55%|█████▍ | 285/520 [18:22<14:28, 3.70s/it] {'loss': 1.1923, 'grad_norm': 0.001262370010044865, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:22<14:28, 3.70s/it] 55%|█████▌ | 286/520 [18:26<14:39, 3.76s/it] {'loss': 1.0753, 'grad_norm': 0.001451682811910492, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:26<14:39, 3.76s/it] 55%|█████▌ | 287/520 [18:29<14:41, 3.78s/it] {'loss': 1.3002, 'grad_norm': 0.0012753537870727166, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:29<14:41, 3.78s/it] 55%|█████▌ | 288/520 [18:33<14:43, 3.81s/it] {'loss': 1.3359, 'grad_norm': 0.0012498896582450994, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:33<14:43, 3.81s/it] 56%|█████▌ | 289/520 [18:37<14:42, 3.82s/it] {'loss': 1.2066, 'grad_norm': 0.0011944485416286435, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:37<14:42, 3.82s/it] 56%|█████▌ | 290/520 [18:41<14:41, 3.83s/it] {'loss': 1.131, 'grad_norm': 0.001193284541365211, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:41<14:41, 3.83s/it] 56%|█████▌ | 291/520 [18:45<14:38, 3.84s/it] {'loss': 1.1774, 'grad_norm': 0.0012832574343486108, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:45<14:38, 3.84s/it] 56%|█████▌ | 292/520 [18:49<14:35, 3.84s/it] {'loss': 1.2342, 'grad_norm': 0.0012845082596791763, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:49<14:35, 3.84s/it] 56%|█████▋ | 293/520 [18:53<14:32, 3.84s/it] {'loss': 1.1786, 'grad_norm': 0.0013284597350662045, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:53<14:32, 3.84s/it] 57%|█████▋ | 294/520 [18:56<14:28, 3.85s/it] {'loss': 1.1987, 'grad_norm': 0.0014071895885836877, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:56<14:28, 3.85s/it] 57%|█████▋ | 295/520 [19:00<14:24, 3.84s/it] {'loss': 1.2241, 'grad_norm': 0.001280439015560157, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [19:00<14:24, 3.84s/it] 57%|█████▋ | 296/520 [19:04<14:21, 3.85s/it] {'loss': 1.1488, 'grad_norm': 0.0014027675806558431, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [19:04<14:21, 3.85s/it] 57%|█████▋ | 297/520 [19:08<14:17, 3.84s/it] {'loss': 1.2778, 'grad_norm': 0.0014001747078641726, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [19:08<14:17, 3.84s/it] 57%|█████▋ | 298/520 [19:12<13:59, 3.78s/it] {'loss': 1.2421, 'grad_norm': 0.0011890953943929803, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:12<13:59, 3.78s/it] 57%|█████▊ | 299/520 [19:15<13:45, 3.73s/it] {'loss': 1.2568, 'grad_norm': 0.0011732134360226013, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:15<13:45, 3.73s/it] 58%|█████▊ | 300/520 [19:19<13:34, 3.70s/it] {'loss': 1.2882, 'grad_norm': 0.0012282529780203698, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:19<13:34, 3.70s/it] 58%|█████▊ | 301/520 [19:22<13:25, 3.68s/it] {'loss': 1.2661, 'grad_norm': 0.0012316192310939784, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:22<13:25, 3.68s/it] 58%|█████▊ | 302/520 [19:26<13:18, 3.66s/it] {'loss': 1.2701, 'grad_norm': 0.001297756675981555, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:26<13:18, 3.66s/it] 58%|█████▊ | 303/520 [19:30<13:13, 3.66s/it] {'loss': 1.1989, 'grad_norm': 0.001416793438871278, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:30<13:13, 3.66s/it] 58%|█████▊ | 304/520 [19:34<13:36, 3.78s/it] {'loss': 1.1738, 'grad_norm': 0.0014402398078061257, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:34<13:36, 3.78s/it] 59%|█████▊ | 305/520 [19:37<13:23, 3.74s/it] {'loss': 1.3004, 'grad_norm': 0.0013811142990107236, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:37<13:23, 3.74s/it] 59%|█████▉ | 306/520 [19:41<13:19, 3.74s/it] {'loss': 1.2454, 'grad_norm': 0.0012757651873761288, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:41<13:19, 3.74s/it] 59%|█████▉ | 307/520 [19:45<13:08, 3.70s/it] {'loss': 1.1808, 'grad_norm': 0.001151097743020347, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:45<13:08, 3.70s/it] 59%|█████▉ | 308/520 [19:48<13:00, 3.68s/it] {'loss': 1.2996, 'grad_norm': 0.001361064407650148, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:48<13:00, 3.68s/it] 59%|█████▉ | 309/520 [19:52<12:54, 3.67s/it] {'loss': 1.1877, 'grad_norm': 0.0011781343428303862, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:52<12:54, 3.67s/it] 60%|█████▉ | 310/520 [19:56<12:47, 3.65s/it] {'loss': 1.1685, 'grad_norm': 0.0012666918030471534, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:56<12:47, 3.65s/it] 60%|█████▉ | 311/520 [19:59<12:41, 3.65s/it] {'loss': 1.1373, 'grad_norm': 0.0012640697595190907, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:59<12:41, 3.65s/it] 60%|██████ | 312/520 [20:03<12:39, 3.65s/it] {'loss': 1.1307, 'grad_norm': 0.0013485287521297392, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [20:03<12:39, 3.65s/it] 60%|██████ | 313/520 [20:07<12:36, 3.66s/it] {'loss': 1.1188, 'grad_norm': 0.0011150522098117798, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [20:07<12:36, 3.66s/it] 60%|██████ | 314/520 [20:11<12:56, 3.77s/it] {'loss': 1.1574, 'grad_norm': 0.0011898165922636754, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [20:11<12:56, 3.77s/it] 61%|██████ | 315/520 [20:14<12:44, 3.73s/it] {'loss': 1.2213, 'grad_norm': 0.0017479825651722332, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [20:14<12:44, 3.73s/it] 61%|██████ | 316/520 [20:18<13:04, 3.84s/it] {'loss': 1.1364, 'grad_norm': 0.001387675264565363, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:18<13:04, 3.84s/it] 61%|██████ | 317/520 [20:22<12:48, 3.79s/it] {'loss': 1.1539, 'grad_norm': 0.0011287750192282436, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:22<12:48, 3.79s/it] 61%|██████ | 318/520 [20:26<12:37, 3.75s/it] {'loss': 1.26, 'grad_norm': 0.001399875553354766, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:26<12:37, 3.75s/it] 61%|██████▏ | 319/520 [20:30<12:48, 3.82s/it] {'loss': 1.1395, 'grad_norm': 0.0011536815396723907, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:30<12:48, 3.82s/it] 62%|██████▏ | 320/520 [20:33<12:35, 3.78s/it] {'loss': 1.0858, 'grad_norm': 0.0012923733546706996, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:33<12:35, 3.78s/it] 62%|██████▏ | 321/520 [20:37<12:45, 3.85s/it] {'loss': 1.2799, 'grad_norm': 0.0013608684805345107, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:37<12:45, 3.85s/it] 62%|██████▏ | 322/520 [20:41<12:31, 3.79s/it] {'loss': 1.1176, 'grad_norm': 0.0011793872464283559, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:41<12:31, 3.79s/it] 62%|██████▏ | 323/520 [20:45<12:19, 3.75s/it] {'loss': 1.1877, 'grad_norm': 0.0012126904202365396, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:45<12:19, 3.75s/it] 62%|██████▏ | 324/520 [20:48<12:11, 3.73s/it] {'loss': 1.2183, 'grad_norm': 0.0013271659074244253, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:48<12:11, 3.73s/it] 62%|██████▎ | 325/520 [20:52<12:02, 3.70s/it] {'loss': 1.2231, 'grad_norm': 0.0014389589209603834, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:52<12:02, 3.70s/it] 63%|██████▎ | 326/520 [20:56<11:58, 3.70s/it] {'loss': 1.2159, 'grad_norm': 0.0012991076577342356, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:56<11:58, 3.70s/it] 63%|██████▎ | 327/520 [20:59<11:55, 3.71s/it] {'loss': 1.2295, 'grad_norm': 0.00129386057728906, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:59<11:55, 3.71s/it] 63%|██████▎ | 328/520 [21:03<11:51, 3.71s/it] {'loss': 1.2641, 'grad_norm': 0.0012838319497097286, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [21:03<11:51, 3.71s/it] 63%|██████▎ | 329/520 [21:07<11:45, 3.69s/it] {'loss': 1.1406, 'grad_norm': 0.0010923954612839261, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:07<11:45, 3.69s/it] 63%|██████▎ | 330/520 [21:11<11:39, 3.68s/it] {'loss': 1.2098, 'grad_norm': 0.0011522169218869435, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:11<11:39, 3.68s/it] 64%|██████▎ | 331/520 [21:14<11:34, 3.67s/it] {'loss': 1.1702, 'grad_norm': 0.0011845217048209221, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:14<11:34, 3.67s/it] 64%|██████▍ | 332/520 [21:18<11:30, 3.67s/it] {'loss': 1.2569, 'grad_norm': 0.0011726651738857088, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:18<11:30, 3.67s/it] 64%|██████▍ | 333/520 [21:21<11:26, 3.67s/it] {'loss': 1.3103, 'grad_norm': 0.0012905909933159426, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:22<11:26, 3.67s/it] 64%|██████▍ | 334/520 [21:25<11:23, 3.68s/it] {'loss': 1.2215, 'grad_norm': 0.0013142595578413744, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:25<11:23, 3.68s/it] 64%|██████▍ | 335/520 [21:29<11:17, 3.66s/it] {'loss': 1.2209, 'grad_norm': 0.0011397701106544808, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:29<11:17, 3.66s/it] 65%|██████▍ | 336/520 [21:33<11:21, 3.71s/it] {'loss': 1.118, 'grad_norm': 0.0013623314403073692, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:33<11:21, 3.71s/it] 65%|██████▍ | 337/520 [21:37<11:27, 3.76s/it] {'loss': 1.104, 'grad_norm': 0.0011864836078339174, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:37<11:27, 3.76s/it] 65%|██████▌ | 338/520 [21:40<11:29, 3.79s/it] {'loss': 1.2196, 'grad_norm': 0.0012588394203135406, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:40<11:29, 3.79s/it] 65%|██████▌ | 339/520 [21:44<11:30, 3.82s/it] {'loss': 1.1659, 'grad_norm': 0.0012010964532996687, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:44<11:30, 3.82s/it] 65%|██████▌ | 340/520 [21:48<11:30, 3.83s/it] {'loss': 1.158, 'grad_norm': 0.0012066239117946168, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:48<11:30, 3.83s/it] 66%|██████▌ | 341/520 [21:52<11:27, 3.84s/it] {'loss': 1.1847, 'grad_norm': 0.0013146135258726883, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:52<11:27, 3.84s/it] 66%|██████▌ | 342/520 [21:56<11:23, 3.84s/it] {'loss': 1.2244, 'grad_norm': 0.0015372300217290004, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:56<11:23, 3.84s/it] 66%|██████▌ | 343/520 [22:00<11:21, 3.85s/it] {'loss': 1.1786, 'grad_norm': 0.001203376595212531, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [22:00<11:21, 3.85s/it] 66%|██████▌ | 344/520 [22:04<11:17, 3.85s/it] {'loss': 1.1372, 'grad_norm': 0.001208766771058526, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [22:04<11:17, 3.85s/it] 66%|██████▋ | 345/520 [22:07<11:15, 3.86s/it] {'loss': 1.2485, 'grad_norm': 0.0012995501814117788, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:07<11:15, 3.86s/it] 67%|██████▋ | 346/520 [22:11<11:14, 3.87s/it] {'loss': 1.1969, 'grad_norm': 0.0011982188136740039, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:11<11:14, 3.87s/it] 67%|██████▋ | 347/520 [22:15<11:07, 3.86s/it] {'loss': 1.1502, 'grad_norm': 0.0011561826018852486, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:15<11:07, 3.86s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:19<11:03, 3.86s/it] {'loss': 1.1126, 'grad_norm': 0.0015677385205809938, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:19<11:03, 3.86s/it] 67%|██████▋ | 349/520 [22:23<11:00, 3.86s/it] {'loss': 1.1497, 'grad_norm': 0.0012069504346907947, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:23<11:00, 3.86s/it] 67%|██████▋ | 350/520 [22:27<10:55, 3.86s/it] {'loss': 1.1928, 'grad_norm': 0.0013390834642029077, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:27<10:55, 3.86s/it] 68%|██████▊ | 351/520 [22:31<10:53, 3.87s/it] {'loss': 1.1001, 'grad_norm': 0.0011371406053730492, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:31<10:53, 3.87s/it] 68%|██████▊ | 352/520 [22:34<10:38, 3.80s/it] {'loss': 1.2254, 'grad_norm': 0.0012247983379765298, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:34<10:38, 3.80s/it] 68%|██████▊ | 353/520 [22:38<10:28, 3.76s/it] {'loss': 1.1494, 'grad_norm': 0.0010150010662664448, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:38<10:28, 3.76s/it] 68%|██████▊ | 354/520 [22:42<10:20, 3.74s/it] {'loss': 1.2668, 'grad_norm': 0.0011249716660181354, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:42<10:20, 3.74s/it] 68%|██████▊ | 355/520 [22:45<10:14, 3.72s/it] {'loss': 1.1612, 'grad_norm': 0.0012120240298871755, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:45<10:14, 3.72s/it] 68%|██████▊ | 356/520 [22:49<10:09, 3.72s/it] {'loss': 1.1653, 'grad_norm': 0.001244130567817561, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:49<10:09, 3.72s/it] 69%|██████▊ | 357/520 [22:53<10:05, 3.71s/it] {'loss': 1.1923, 'grad_norm': 0.0011768526946325623, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:53<10:05, 3.71s/it] 69%|██████▉ | 358/520 [22:56<09:58, 3.69s/it] {'loss': 1.1271, 'grad_norm': 0.0011822716646692263, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:56<09:58, 3.69s/it] 69%|██████▉ | 359/520 [23:00<09:53, 3.69s/it] {'loss': 1.2, 'grad_norm': 0.0012160136178244628, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [23:00<09:53, 3.69s/it] 69%|██████▉ | 360/520 [23:04<09:48, 3.68s/it] {'loss': 1.2081, 'grad_norm': 0.0012297742471092767, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [23:04<09:48, 3.68s/it] 69%|██████▉ | 361/520 [23:07<09:43, 3.67s/it] {'loss': 1.2156, 'grad_norm': 0.0011687658126504864, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:07<09:43, 3.67s/it] 70%|██████▉ | 362/520 [23:11<09:41, 3.68s/it] {'loss': 1.1789, 'grad_norm': 0.0012966766859940585, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:11<09:41, 3.68s/it] 70%|██████▉ | 363/520 [23:15<09:37, 3.68s/it] {'loss': 1.202, 'grad_norm': 0.0012146156968305837, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:15<09:37, 3.68s/it] 70%|███████ | 364/520 [23:18<09:33, 3.68s/it] {'loss': 1.239, 'grad_norm': 0.0011999828466672119, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:18<09:33, 3.68s/it] 70%|███████ | 365/520 [23:22<09:29, 3.67s/it] {'loss': 1.259, 'grad_norm': 0.0012862269717501893, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:22<09:29, 3.67s/it] 70%|███████ | 366/520 [23:26<09:25, 3.67s/it] {'loss': 1.2162, 'grad_norm': 0.0012512418188069564, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:26<09:25, 3.67s/it] 71%|███████ | 367/520 [23:29<09:21, 3.67s/it] {'loss': 1.2168, 'grad_norm': 0.0012084716267041357, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:29<09:21, 3.67s/it] 71%|███████ | 368/520 [23:33<09:16, 3.66s/it] {'loss': 1.07, 'grad_norm': 0.0013457693252492246, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:33<09:16, 3.66s/it] 71%|███████ | 369/520 [23:37<09:19, 3.71s/it] {'loss': 1.1918, 'grad_norm': 0.0010777703696531625, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:37<09:19, 3.71s/it] 71%|███████ | 370/520 [23:41<09:16, 3.71s/it] {'loss': 1.1303, 'grad_norm': 0.0011744057632709283, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:41<09:16, 3.71s/it] 71%|███████▏ | 371/520 [23:44<09:10, 3.69s/it] {'loss': 1.1299, 'grad_norm': 0.0012558635945150727, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:44<09:10, 3.69s/it] 72%|███████▏ | 372/520 [23:48<09:05, 3.68s/it] {'loss': 1.2682, 'grad_norm': 0.0011524217612096676, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:48<09:05, 3.68s/it] 72%|███████▏ | 373/520 [23:52<09:01, 3.68s/it] {'loss': 1.1555, 'grad_norm': 0.001285855623908215, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:52<09:01, 3.68s/it] 72%|███████▏ | 374/520 [23:55<08:57, 3.68s/it] {'loss': 1.2146, 'grad_norm': 0.0011981254473672049, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:55<08:57, 3.68s/it] 72%|███████▏ | 375/520 [23:59<08:55, 3.69s/it] {'loss': 1.134, 'grad_norm': 0.001215545840952741, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:59<08:55, 3.69s/it] 72%|███████▏ | 376/520 [24:03<08:53, 3.70s/it] {'loss': 1.2461, 'grad_norm': 0.00115832692553684, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [24:03<08:53, 3.70s/it] 72%|███████▎ | 377/520 [24:06<08:48, 3.70s/it] {'loss': 1.1805, 'grad_norm': 0.0012198147181989858, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:06<08:48, 3.70s/it] 73%|███████▎ | 378/520 [24:10<08:45, 3.70s/it] {'loss': 1.2378, 'grad_norm': 0.001215038357269985, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:10<08:45, 3.70s/it] 73%|███████▎ | 379/520 [24:14<08:40, 3.69s/it] {'loss': 1.2144, 'grad_norm': 0.0011533436709522508, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:14<08:40, 3.69s/it] 73%|███████▎ | 380/520 [24:17<08:36, 3.69s/it] {'loss': 1.2401, 'grad_norm': 0.0012337779846817829, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:17<08:36, 3.69s/it] 73%|███████▎ | 381/520 [24:21<08:35, 3.71s/it] {'loss': 1.2204, 'grad_norm': 0.001195872811894673, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:21<08:35, 3.71s/it] 73%|███████▎ | 382/520 [24:25<08:39, 3.77s/it] {'loss': 1.2066, 'grad_norm': 0.0011578753618322168, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:25<08:39, 3.77s/it] 74%|███████▎ | 383/520 [24:29<08:37, 3.78s/it] {'loss': 1.0527, 'grad_norm': 0.0013053965570802286, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:29<08:37, 3.78s/it] 74%|███████▍ | 384/520 [24:33<08:30, 3.76s/it] {'loss': 1.2514, 'grad_norm': 0.0011277610413480245, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:33<08:30, 3.76s/it] 74%|███████▍ | 385/520 [24:36<08:22, 3.72s/it] {'loss': 1.1929, 'grad_norm': 0.0011076750304390322, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:36<08:22, 3.72s/it] 74%|███████▍ | 386/520 [24:40<08:15, 3.70s/it] {'loss': 1.1484, 'grad_norm': 0.001038499678422457, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:40<08:15, 3.70s/it] 74%|███████▍ | 387/520 [24:44<08:10, 3.69s/it] {'loss': 1.2676, 'grad_norm': 0.0011666536188537852, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:44<08:10, 3.69s/it] 75%|███████▍ | 388/520 [24:47<08:06, 3.69s/it] {'loss': 1.102, 'grad_norm': 0.0011348447154332015, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:47<08:06, 3.69s/it] 75%|███████▍ | 389/520 [24:51<08:02, 3.68s/it] {'loss': 1.1489, 'grad_norm': 0.0016325626206075375, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:51<08:02, 3.68s/it] 75%|███████▌ | 390/520 [24:55<07:59, 3.69s/it] {'loss': 1.2138, 'grad_norm': 0.001118010885154056, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:55<07:59, 3.69s/it] 75%|███████▌ | 391/520 [24:58<07:54, 3.67s/it] {'loss': 1.2881, 'grad_norm': 0.0012561766634655613, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:58<07:54, 3.67s/it] 75%|███████▌ | 392/520 [25:02<07:50, 3.68s/it] {'loss': 1.1064, 'grad_norm': 0.001262651216482727, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [25:02<07:50, 3.68s/it] 76%|███████▌ | 393/520 [25:06<07:44, 3.66s/it] {'loss': 1.1081, 'grad_norm': 0.00102519753750807, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:06<07:44, 3.66s/it] 76%|███████▌ | 394/520 [25:09<07:39, 3.65s/it] {'loss': 1.1688, 'grad_norm': 0.0012646144160260563, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:09<07:39, 3.65s/it] 76%|███████▌ | 395/520 [25:13<07:36, 3.65s/it] {'loss': 1.1364, 'grad_norm': 0.00129921163690221, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:13<07:36, 3.65s/it] 76%|███████▌ | 396/520 [25:16<07:31, 3.64s/it] {'loss': 1.2155, 'grad_norm': 0.0012995883656247832, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:16<07:31, 3.64s/it] 76%|███████▋ | 397/520 [25:20<07:28, 3.65s/it] {'loss': 1.1932, 'grad_norm': 0.0011406055078620702, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:20<07:28, 3.65s/it] 77%|███████▋ | 398/520 [25:24<07:24, 3.64s/it] {'loss': 1.1973, 'grad_norm': 0.0012447640421334844, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:24<07:24, 3.64s/it] 77%|███████▋ | 399/520 [25:27<07:20, 3.64s/it] {'loss': 1.1503, 'grad_norm': 0.0012166407711266165, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:27<07:20, 3.64s/it] 77%|███████▋ | 400/520 [25:31<07:17, 3.64s/it] {'loss': 1.1839, 'grad_norm': 0.0010980196046065204, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:31<07:17, 3.64s/it] 77%|███████▋ | 401/520 [25:35<07:13, 3.64s/it] {'loss': 1.0311, 'grad_norm': 0.0013615235615838542, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:35<07:13, 3.64s/it] 77%|███████▋ | 402/520 [25:38<07:10, 3.65s/it] {'loss': 1.1511, 'grad_norm': 0.0012205538513932839, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:38<07:10, 3.65s/it] 78%|███████▊ | 403/520 [25:42<07:07, 3.65s/it] {'loss': 1.1764, 'grad_norm': 0.0013197185396603588, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:42<07:07, 3.65s/it] 78%|███████▊ | 404/520 [25:46<07:02, 3.64s/it] {'loss': 1.081, 'grad_norm': 0.0014530930528940179, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:46<07:02, 3.64s/it] 78%|███████▊ | 405/520 [25:49<06:58, 3.64s/it] {'loss': 1.1628, 'grad_norm': 0.0012173752664432842, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:49<06:58, 3.64s/it] 78%|███████▊ | 406/520 [25:53<07:05, 3.73s/it] {'loss': 1.0799, 'grad_norm': 0.0015723972851095014, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:53<07:05, 3.73s/it] 78%|███████▊ | 407/520 [25:57<07:06, 3.77s/it] {'loss': 1.2592, 'grad_norm': 0.001213050858900648, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:57<07:06, 3.77s/it] 78%|███████▊ | 408/520 [26:01<07:06, 3.80s/it] {'loss': 1.1681, 'grad_norm': 0.001415183816792439, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [26:01<07:06, 3.80s/it] 79%|███████▊ | 409/520 [26:05<07:05, 3.83s/it] {'loss': 1.2823, 'grad_norm': 0.0013296115433390023, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:05<07:05, 3.83s/it] 79%|███████▉ | 410/520 [26:09<07:02, 3.84s/it] {'loss': 1.0177, 'grad_norm': 0.0012537107819121389, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:09<07:02, 3.84s/it] 79%|███████▉ | 411/520 [26:13<06:59, 3.84s/it] {'loss': 1.2661, 'grad_norm': 0.00145065608374362, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:13<06:59, 3.84s/it] 79%|███████▉ | 412/520 [26:16<06:49, 3.79s/it] {'loss': 1.1731, 'grad_norm': 0.001224336845349486, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:16<06:49, 3.79s/it] 79%|███████▉ | 413/520 [26:20<06:43, 3.77s/it] {'loss': 1.1734, 'grad_norm': 0.0011230474334117501, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:20<06:43, 3.77s/it] 80%|███████▉ | 414/520 [26:24<06:36, 3.74s/it] {'loss': 0.9845, 'grad_norm': 0.001017112805511303, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:24<06:36, 3.74s/it] 80%|███████▉ | 415/520 [26:27<06:32, 3.74s/it] {'loss': 1.1523, 'grad_norm': 0.0011408949107052616, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:27<06:32, 3.74s/it] 80%|████████ | 416/520 [26:31<06:26, 3.72s/it] {'loss': 1.0693, 'grad_norm': 0.0012739592149685738, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:31<06:26, 3.72s/it] 80%|████████ | 417/520 [26:35<06:21, 3.70s/it] {'loss': 1.2295, 'grad_norm': 0.0013176358672238112, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:35<06:21, 3.70s/it] 80%|████████ | 418/520 [26:38<06:16, 3.69s/it] {'loss': 1.2171, 'grad_norm': 0.0012288084267896185, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:38<06:16, 3.69s/it] 81%|████████ | 419/520 [26:42<06:11, 3.68s/it] {'loss': 1.2086, 'grad_norm': 0.0013306981104163342, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:42<06:11, 3.68s/it] 81%|████████ | 420/520 [26:46<06:10, 3.70s/it] {'loss': 1.0989, 'grad_norm': 0.0012723284592228812, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:46<06:10, 3.70s/it] 81%|████████ | 421/520 [26:50<06:09, 3.73s/it] {'loss': 1.0372, 'grad_norm': 0.0014773704771983845, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:50<06:09, 3.73s/it] 81%|████████ | 422/520 [26:53<06:08, 3.76s/it] {'loss': 1.1553, 'grad_norm': 0.0012280394297226986, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:53<06:08, 3.76s/it] 81%|████████▏ | 423/520 [26:57<06:03, 3.75s/it] {'loss': 1.1361, 'grad_norm': 0.001319282489352836, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:57<06:03, 3.75s/it] 82%|████████▏ | 424/520 [27:01<06:03, 3.79s/it] {'loss': 1.2581, 'grad_norm': 0.0012210344298364758, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [27:01<06:03, 3.79s/it] 82%|████████▏ | 425/520 [27:05<06:02, 3.82s/it] {'loss': 1.1491, 'grad_norm': 0.001184231169749162, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:05<06:02, 3.82s/it] 82%|████████▏ | 426/520 [27:09<05:59, 3.83s/it] {'loss': 1.1616, 'grad_norm': 0.001513379443568953, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:09<05:59, 3.83s/it] 82%|████████▏ | 427/520 [27:13<05:57, 3.84s/it] {'loss': 1.0829, 'grad_norm': 0.0011370754789940503, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:13<05:57, 3.84s/it] 82%|████████▏ | 428/520 [27:16<05:54, 3.85s/it] {'loss': 1.0624, 'grad_norm': 0.001237005275980239, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:16<05:54, 3.85s/it] 82%|████████▎ | 429/520 [27:20<05:52, 3.87s/it] {'loss': 1.1574, 'grad_norm': 0.0012039965897817327, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:20<05:52, 3.87s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:24<05:49, 3.89s/it] {'loss': 1.1613, 'grad_norm': 0.0011177455330359234, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:24<05:49, 3.89s/it] 83%|████████▎ | 431/520 [27:28<05:47, 3.90s/it] {'loss': 1.1415, 'grad_norm': 0.0013110854270587146, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:28<05:47, 3.90s/it] 83%|████████▎ | 432/520 [27:32<05:42, 3.90s/it] {'loss': 1.0711, 'grad_norm': 0.0012204881536762137, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:32<05:42, 3.90s/it] 83%|████████▎ | 433/520 [27:36<05:39, 3.90s/it] {'loss': 1.2002, 'grad_norm': 0.0012008658798772166, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:36<05:39, 3.90s/it] 83%|████████▎ | 434/520 [27:40<05:34, 3.89s/it] {'loss': 0.9525, 'grad_norm': 0.0011898910209320868, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:40<05:34, 3.89s/it] 84%|████████▎ | 435/520 [27:44<05:30, 3.89s/it] {'loss': 1.2399, 'grad_norm': 0.0013449434046260854, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:44<05:30, 3.89s/it] 84%|████████▍ | 436/520 [27:48<05:23, 3.85s/it] {'loss': 1.0408, 'grad_norm': 0.0012313658942029746, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:48<05:23, 3.85s/it] 84%|████████▍ | 437/520 [27:51<05:15, 3.81s/it] {'loss': 1.2578, 'grad_norm': 0.0012248359786765418, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:51<05:15, 3.81s/it] 84%|████████▍ | 438/520 [27:55<05:07, 3.75s/it] {'loss': 1.0805, 'grad_norm': 0.001176845217643145, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:55<05:07, 3.75s/it] 84%|████████▍ | 439/520 [27:59<05:01, 3.72s/it] {'loss': 1.1274, 'grad_norm': 0.0010179194169584937, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:59<05:01, 3.72s/it] 85%|████████▍ | 440/520 [28:02<04:55, 3.69s/it] {'loss': 1.1111, 'grad_norm': 0.0012045262106318026, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [28:02<04:55, 3.69s/it] 85%|████████▍ | 441/520 [28:06<04:52, 3.71s/it] {'loss': 1.1384, 'grad_norm': 0.0011520626012599905, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [28:06<04:52, 3.71s/it] 85%|████████▌ | 442/520 [28:10<04:51, 3.74s/it] {'loss': 1.174, 'grad_norm': 0.0013274176002812115, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:10<04:51, 3.74s/it] 85%|████████▌ | 443/520 [28:14<04:49, 3.77s/it] {'loss': 1.1898, 'grad_norm': 0.0011683895423794065, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:14<04:49, 3.77s/it] 85%|████████▌ | 444/520 [28:17<04:47, 3.78s/it] {'loss': 1.1527, 'grad_norm': 0.001121381785167768, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:17<04:47, 3.78s/it] 86%|████████▌ | 445/520 [28:21<04:40, 3.74s/it] {'loss': 1.0852, 'grad_norm': 0.0011557675106591344, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:21<04:40, 3.74s/it] 86%|████████▌ | 446/520 [28:25<04:34, 3.70s/it] {'loss': 1.2192, 'grad_norm': 0.001138157873710027, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:25<04:34, 3.70s/it] 86%|████████▌ | 447/520 [28:28<04:30, 3.71s/it] {'loss': 1.1641, 'grad_norm': 0.0012013586009245797, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:28<04:30, 3.71s/it] 86%|████████▌ | 448/520 [28:32<04:25, 3.69s/it] {'loss': 1.1517, 'grad_norm': 0.0012346159228635524, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:32<04:25, 3.69s/it] 86%|████████▋ | 449/520 [28:36<04:21, 3.69s/it] {'loss': 1.1751, 'grad_norm': 0.0012326994354032966, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:36<04:21, 3.69s/it] 87%|████████▋ | 450/520 [28:39<04:17, 3.67s/it] {'loss': 1.1815, 'grad_norm': 0.001207947269954956, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:39<04:17, 3.67s/it] 87%|████████▋ | 451/520 [28:43<04:12, 3.67s/it] {'loss': 1.178, 'grad_norm': 0.0011933737647986365, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:43<04:12, 3.67s/it] 87%|████████▋ | 452/520 [28:47<04:08, 3.65s/it] {'loss': 1.2174, 'grad_norm': 0.0011134906067249248, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:47<04:08, 3.65s/it] 87%|████████▋ | 453/520 [28:50<04:04, 3.65s/it] {'loss': 1.1925, 'grad_norm': 0.0011616540212692193, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:50<04:04, 3.65s/it] 87%|████████▋ | 454/520 [28:54<04:01, 3.66s/it] {'loss': 1.0909, 'grad_norm': 0.0012617052820651912, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:54<04:01, 3.66s/it] 88%|████████▊ | 455/520 [28:58<03:57, 3.65s/it] {'loss': 1.2272, 'grad_norm': 0.001164231106213407, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:58<03:57, 3.65s/it] 88%|████████▊ | 456/520 [29:01<03:53, 3.65s/it] {'loss': 1.1535, 'grad_norm': 0.0012055768016460517, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [29:01<03:53, 3.65s/it] 88%|████████▊ | 457/520 [29:05<03:50, 3.66s/it] {'loss': 1.1042, 'grad_norm': 0.0010479732031645342, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [29:05<03:50, 3.66s/it] 88%|████████▊ | 458/520 [29:09<03:46, 3.65s/it] {'loss': 1.2832, 'grad_norm': 0.0012763137733040665, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:09<03:46, 3.65s/it] 88%|████████▊ | 459/520 [29:12<03:43, 3.67s/it] {'loss': 1.214, 'grad_norm': 0.0013265425272431478, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:12<03:43, 3.67s/it] 88%|████████▊ | 460/520 [29:16<03:39, 3.66s/it] {'loss': 1.1033, 'grad_norm': 0.0011759560587945512, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:16<03:39, 3.66s/it] 89%|████████▊ | 461/520 [29:20<03:36, 3.66s/it] {'loss': 1.1834, 'grad_norm': 0.0009624228179114103, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:20<03:36, 3.66s/it] 89%|████████▉ | 462/520 [29:23<03:32, 3.66s/it] {'loss': 1.2638, 'grad_norm': 0.0011380491536792999, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:23<03:32, 3.66s/it] 89%|████████▉ | 463/520 [29:27<03:27, 3.64s/it] {'loss': 1.0573, 'grad_norm': 0.0012616295790976529, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:27<03:27, 3.64s/it] 89%|████████▉ | 464/520 [29:30<03:24, 3.66s/it] {'loss': 1.1963, 'grad_norm': 0.0013143437092784504, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:30<03:24, 3.66s/it] 89%|████████▉ | 465/520 [29:34<03:23, 3.70s/it] {'loss': 1.3022, 'grad_norm': 0.0012842773402516, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:34<03:23, 3.70s/it] 90%|████████▉ | 466/520 [29:38<03:18, 3.68s/it] {'loss': 1.187, 'grad_norm': 0.0011223212041579188, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:38<03:18, 3.68s/it] 90%|████████▉ | 467/520 [29:42<03:14, 3.67s/it] {'loss': 1.1511, 'grad_norm': 0.001093066630162827, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:42<03:14, 3.67s/it] 90%|█████████ | 468/520 [29:45<03:10, 3.66s/it] {'loss': 1.1623, 'grad_norm': 0.0013227174651165188, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:45<03:10, 3.66s/it] 90%|█████████ | 469/520 [29:49<03:06, 3.65s/it] {'loss': 1.2222, 'grad_norm': 0.0013576709885438005, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:49<03:06, 3.65s/it] 90%|█████████ | 470/520 [29:52<03:02, 3.65s/it] {'loss': 1.1039, 'grad_norm': 0.0011140006218108153, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:52<03:02, 3.65s/it] 91%|█████████ | 471/520 [29:56<02:58, 3.65s/it] {'loss': 1.1286, 'grad_norm': 0.0012800502117537567, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:56<02:58, 3.65s/it] 91%|█████████ | 472/520 [30:00<02:55, 3.66s/it] {'loss': 1.0899, 'grad_norm': 0.0012018193581579068, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [30:00<02:55, 3.66s/it] 91%|█████████ | 473/520 [30:03<02:51, 3.66s/it] {'loss': 1.1567, 'grad_norm': 0.0012822754498592335, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [30:03<02:51, 3.66s/it] 91%|█████████ | 474/520 [30:07<02:47, 3.65s/it] {'loss': 1.1899, 'grad_norm': 0.0011393892658898388, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:07<02:47, 3.65s/it] 91%|█████████▏| 475/520 [30:11<02:44, 3.66s/it] {'loss': 1.1098, 'grad_norm': 0.001117367332063269, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:11<02:44, 3.66s/it] 92%|█████████▏| 476/520 [30:14<02:40, 3.66s/it] {'loss': 1.1466, 'grad_norm': 0.0012379285964688937, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:14<02:40, 3.66s/it] 92%|█████████▏| 477/520 [30:18<02:36, 3.65s/it] {'loss': 1.1385, 'grad_norm': 0.0013074378663505072, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:18<02:36, 3.65s/it] 92%|█████████▏| 478/520 [30:22<02:33, 3.65s/it] {'loss': 1.0899, 'grad_norm': 0.0012677456560397486, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:22<02:33, 3.65s/it] 92%|█████████▏| 479/520 [30:25<02:29, 3.65s/it] {'loss': 1.1528, 'grad_norm': 0.001321416250564396, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:25<02:29, 3.65s/it] 92%|█████████▏| 480/520 [30:29<02:25, 3.64s/it] {'loss': 1.1737, 'grad_norm': 0.0011003142020289522, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:29<02:25, 3.64s/it] 92%|█████████▎| 481/520 [30:33<02:22, 3.66s/it] {'loss': 1.1648, 'grad_norm': 0.001113358054218087, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:33<02:22, 3.66s/it] 93%|█████████▎| 482/520 [30:36<02:19, 3.66s/it] {'loss': 1.1845, 'grad_norm': 0.001183129140877873, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:36<02:19, 3.66s/it] 93%|█████████▎| 483/520 [30:40<02:14, 3.65s/it] {'loss': 1.1572, 'grad_norm': 0.0012713455519576605, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:40<02:14, 3.65s/it] 93%|█████████▎| 484/520 [30:44<02:11, 3.65s/it] {'loss': 1.1625, 'grad_norm': 0.00123364411424534, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:44<02:11, 3.65s/it] 93%|█████████▎| 485/520 [30:47<02:10, 3.72s/it] {'loss': 1.1189, 'grad_norm': 0.001177608282948492, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:47<02:10, 3.72s/it] 93%|█████████▎| 486/520 [30:51<02:07, 3.75s/it] {'loss': 1.2393, 'grad_norm': 0.00127711835052466, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:51<02:07, 3.75s/it] 94%|█████████▎| 487/520 [30:55<02:04, 3.78s/it] {'loss': 1.0932, 'grad_norm': 0.001204107995244534, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:55<02:04, 3.78s/it] 94%|█████████▍| 488/520 [30:59<02:01, 3.80s/it] {'loss': 1.0393, 'grad_norm': 0.0013214319091628536, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:59<02:01, 3.80s/it] 94%|█████████▍| 489/520 [31:03<01:58, 3.83s/it] {'loss': 1.1864, 'grad_norm': 0.001027958282137469, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [31:03<01:58, 3.83s/it] 94%|█████████▍| 490/520 [31:07<01:54, 3.83s/it] {'loss': 1.1582, 'grad_norm': 0.001212584445938694, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:07<01:54, 3.83s/it] 94%|█████████▍| 491/520 [31:11<01:51, 3.85s/it] {'loss': 1.1223, 'grad_norm': 0.0012385013592592692, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:11<01:51, 3.85s/it] 95%|█████████▍| 492/520 [31:15<01:48, 3.88s/it] {'loss': 1.2325, 'grad_norm': 0.0012552940384244445, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:15<01:48, 3.88s/it] 95%|█████████▍| 493/520 [31:18<01:44, 3.87s/it] {'loss': 1.185, 'grad_norm': 0.0012192679542744847, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:18<01:44, 3.87s/it] 95%|█████████▌| 494/520 [31:22<01:40, 3.87s/it] {'loss': 1.1751, 'grad_norm': 0.001106914640527858, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:22<01:40, 3.87s/it] 95%|█████████▌| 495/520 [31:26<01:36, 3.86s/it] {'loss': 1.1409, 'grad_norm': 0.0012372857294540673, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:26<01:36, 3.86s/it] 95%|█████████▌| 496/520 [31:30<01:32, 3.86s/it] {'loss': 1.0624, 'grad_norm': 0.0012194895632541031, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:30<01:32, 3.86s/it] 96%|█████████▌| 497/520 [31:34<01:28, 3.84s/it] {'loss': 1.1173, 'grad_norm': 0.0010358918358007332, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:34<01:28, 3.84s/it] 96%|█████████▌| 498/520 [31:37<01:22, 3.77s/it] {'loss': 1.1371, 'grad_norm': 0.0012011987916705684, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:37<01:22, 3.77s/it] 96%|█████████▌| 499/520 [31:41<01:18, 3.74s/it] {'loss': 1.2515, 'grad_norm': 0.001299459358845071, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:41<01:18, 3.74s/it] 96%|█████████▌| 500/520 [31:45<01:13, 3.70s/it] {'loss': 1.2552, 'grad_norm': 0.0013760892517260387, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:45<01:13, 3.70s/it] 96%|█████████▋| 501/520 [31:48<01:09, 3.68s/it] {'loss': 1.1588, 'grad_norm': 0.0012984909734544286, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:48<01:09, 3.68s/it] 97%|█████████▋| 502/520 [31:52<01:06, 3.69s/it] {'loss': 1.1755, 'grad_norm': 0.0011255098876257058, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:52<01:06, 3.69s/it] 97%|█████████▋| 503/520 [31:56<01:02, 3.68s/it] {'loss': 1.1506, 'grad_norm': 0.001218566584688749, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:56<01:02, 3.68s/it] 97%|█████████▋| 504/520 [31:59<00:58, 3.66s/it] {'loss': 1.167, 'grad_norm': 0.0014088602336044431, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:59<00:58, 3.66s/it] 97%|█████████▋| 505/520 [32:03<00:54, 3.65s/it] {'loss': 1.1982, 'grad_norm': 0.0012691497248734384, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [32:03<00:54, 3.65s/it] 97%|█████████▋| 506/520 [32:06<00:50, 3.64s/it] {'loss': 1.1283, 'grad_norm': 0.001237941996509028, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:07<00:50, 3.64s/it] 98%|█████████▊| 507/520 [32:10<00:47, 3.64s/it] {'loss': 1.2937, 'grad_norm': 0.0011210303633781367, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:10<00:47, 3.64s/it] 98%|█████████▊| 508/520 [32:14<00:43, 3.64s/it] {'loss': 1.2425, 'grad_norm': 0.0012482141709411379, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:14<00:43, 3.64s/it] 98%|█████████▊| 509/520 [32:17<00:39, 3.63s/it] {'loss': 1.2153, 'grad_norm': 0.0011782607750719567, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:17<00:39, 3.63s/it] 98%|█████████▊| 510/520 [32:21<00:36, 3.65s/it] {'loss': 1.1639, 'grad_norm': 0.0011837452531439896, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:21<00:36, 3.65s/it] 98%|█████████▊| 511/520 [32:25<00:33, 3.71s/it] {'loss': 1.1407, 'grad_norm': 0.0011618147639453314, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:25<00:33, 3.71s/it] 98%|█████████▊| 512/520 [32:29<00:29, 3.75s/it] {'loss': 1.0256, 'grad_norm': 0.0013368606605531944, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:29<00:29, 3.75s/it] 99%|█████████▊| 513/520 [32:33<00:26, 3.78s/it] {'loss': 1.221, 'grad_norm': 0.001349600614007242, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:33<00:26, 3.78s/it] 99%|█████████▉| 514/520 [32:36<00:22, 3.80s/it] {'loss': 1.1917, 'grad_norm': 0.001109577295228624, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:36<00:22, 3.80s/it] 99%|█████████▉| 515/520 [32:40<00:19, 3.81s/it] {'loss': 1.2375, 'grad_norm': 0.0013953159224155882, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:40<00:19, 3.81s/it] 99%|█████████▉| 516/520 [32:44<00:15, 3.82s/it] {'loss': 1.144, 'grad_norm': 0.0011613033956770949, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:44<00:15, 3.82s/it] 99%|█████████▉| 517/520 [32:48<00:11, 3.81s/it] {'loss': 1.1862, 'grad_norm': 0.00113738577180332, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:48<00:11, 3.81s/it] 100%|█████████▉| 518/520 [32:52<00:07, 3.80s/it] {'loss': 1.158, 'grad_norm': 0.0012258764438845182, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:52<00:07, 3.80s/it] 100%|█████████▉| 519/520 [32:56<00:03, 3.80s/it] {'loss': 1.1555, 'grad_norm': 0.0011666836072316974, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:56<00:03, 3.80s/it] 100%|██████████| 520/520 [33:00<00:00, 4.07s/it] {'loss': 1.1586, 'grad_norm': 0.0011320548499614315, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [33:00<00:00, 4.07s/it] {'train_runtime': 1980.7326, 'train_samples_per_second': 33.588, 'train_steps_per_second': 0.263, 'train_loss': 1.2493171043120899, 'epoch': 1.0} + 100%|██████████| 520/520 [33:00<00:00, 4.07s/it] 100%|██████████| 520/520 [33:00<00:00, 3.81s/it] +[2025-10-13 21:30:29,363] [INFO] [launch.py:348:main] Process 1047398 exits successfully. +[2025-10-13 21:30:29,364] [INFO] [launch.py:348:main] Process 1047397 exits successfully. +[2025-10-13 21:30:30,365] [INFO] [launch.py:348:main] Process 1047403 exits successfully. +[2025-10-13 21:30:30,366] [INFO] [launch.py:348:main] Process 1047400 exits successfully. +[2025-10-13 21:30:30,366] [INFO] [launch.py:348:main] Process 1047399 exits successfully. +[2025-10-13 21:30:30,366] [INFO] [launch.py:348:main] Process 1047401 exits successfully. +[2025-10-13 21:30:30,367] [INFO] [launch.py:348:main] Process 1047402 exits successfully. +[2025-10-13 21:30:34,371] [INFO] [launch.py:348:main] Process 1047396 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.3_2e-1_connector-5.0_2.3_2e-1_ablation_20251013_205557.log +Timestamp: 2025-10-13 21:30:36 +===================================== diff --git a/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation_20251013_213037.log b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation_20251013_213037.log new file mode 100644 index 0000000000000000000000000000000000000000..2db00d5a6bdfc6885dcd7a741b48d0a0f3e6f784 --- /dev/null +++ b/logs_oct12/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation_20251013_213037.log @@ -0,0 +1,1840 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation_20251013_213037.log +Timestamp: 2025-10-13 21:30:37 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 21:30:39,709] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:42,753] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-13 21:30:42,755] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 5.0 --temperature_attn_text 2.5 --temperature_mlp_text 2.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 5.0 --temperature_attn_vision 2.5 --temperature_mlp_vision 2.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 5.0 --temperature_connector 2.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 21:30:45,351] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:46,412] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-13 21:30:46,412] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-13 21:30:46,412] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-13 21:30:46,412] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-13 21:30:46,412] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-13 21:30:46,412] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-13 21:30:46,412] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-13 21:30:46,415] [INFO] [launch.py:253:main] process 1067145 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 21:30:46,417] [INFO] [launch.py:253:main] process 1067146 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 21:30:46,419] [INFO] [launch.py:253:main] process 1067147 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 21:30:46,421] [INFO] [launch.py:253:main] process 1067148 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 21:30:46,424] [INFO] [launch.py:253:main] process 1067149 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 21:30:46,426] [INFO] [launch.py:253:main] process 1067150 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 21:30:46,428] [INFO] [launch.py:253:main] process 1067151 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-13 21:30:46,430] [INFO] [launch.py:253:main] process 1067152 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-5.0_2.5_2e-1_connector-5.0_2.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '5.0', '--temperature_attn_text', '2.5', '--temperature_mlp_text', '2.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '5.0', '--temperature_attn_vision', '2.5', '--temperature_mlp_vision', '2.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '5.0', '--temperature_connector', '2.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-13 21:30:53,000] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:53,204] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:53,213] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:53,227] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:53,284] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:53,292] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:53,315] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:53,315] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-13 21:30:53,438] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 21:30:53,614] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 21:30:53,623] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 21:30:53,623] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-13 21:30:53,635] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 21:30:53,696] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 21:30:53,698] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 21:30:53,725] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-13 21:30:53,725] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 2.5, 'temperature_mlp': 2.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 2.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 2.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 2.5, + "temperature_mlp": 2.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1067145:1067145 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067145:1067145 [0] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067145:1067145 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1067145:1067145 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1067145:1067145 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1067145:1067145 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1067148:1067148 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1067148:1067148 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067148:1067148 [3] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067148:1067148 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1067148:1067148 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1067148:1067148 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1067146:1067146 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1067146:1067146 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067146:1067146 [1] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067146:1067146 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1067146:1067146 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1067146:1067146 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1067152:1067152 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1067152:1067152 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067152:1067152 [7] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067149:1067149 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1067152:1067152 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1067152:1067152 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1067152:1067152 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1067149:1067149 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067149:1067149 [4] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067149:1067149 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1067149:1067149 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1067149:1067149 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1067147:1067147 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1067147:1067147 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067147:1067147 [2] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067147:1067147 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1067147:1067147 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1067147:1067147 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1067151:1067151 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1067151:1067151 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067151:1067151 [6] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067151:1067151 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1067151:1067151 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1067151:1067151 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1067150:1067150 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1067150:1067150 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067150:1067150 [5] NCCL INFO Bootstrap : Using eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067150:1067150 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1067150:1067150 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1067150:1067150 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.136.19<0> +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO ncclCommInitRank comm 0x559bab28e5c0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xed8d494d36aadff7 - Init START +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO ncclCommInitRank comm 0x55c18b535a40 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xed8d494d36aadff7 - Init START +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO ncclCommInitRank comm 0x55acd705aac0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xed8d494d36aadff7 - Init START +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO ncclCommInitRank comm 0x5591adee10e0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xed8d494d36aadff7 - Init START +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO ncclCommInitRank comm 0x55b8f1080900 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xed8d494d36aadff7 - Init START +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO ncclCommInitRank comm 0x564f98ccea70 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xed8d494d36aadff7 - Init START +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO ncclCommInitRank comm 0x55c6b31c3520 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xed8d494d36aadff7 - Init START +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO ncclCommInitRank comm 0x562b1de90b00 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xed8d494d36aadff7 - Init START +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO comm 0x55acd705aac0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO comm 0x564f98ccea70 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO comm 0x55b8f1080900 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO comm 0x562b1de90b00 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO comm 0x55c6b31c3520 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO comm 0x559bab28e5c0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO comm 0x55c18b535a40 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO comm 0x5591adee10e0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1067150:1068736 [5] NCCL INFO ncclCommInitRank comm 0x559bab28e5c0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xed8d494d36aadff7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067152:1068732 [7] NCCL INFO ncclCommInitRank comm 0x562b1de90b00 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xed8d494d36aadff7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067149:1068733 [4] NCCL INFO ncclCommInitRank comm 0x5591adee10e0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xed8d494d36aadff7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1067146:1068731 [1] NCCL INFO ncclCommInitRank comm 0x55b8f1080900 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xed8d494d36aadff7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1067145:1068729 [0] NCCL INFO ncclCommInitRank comm 0x55c6b31c3520 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xed8d494d36aadff7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067147:1068734 [2] NCCL INFO ncclCommInitRank comm 0x564f98ccea70 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xed8d494d36aadff7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067151:1068735 [6] NCCL INFO ncclCommInitRank comm 0x55c18b535a40 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xed8d494d36aadff7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1067148:1068730 [3] NCCL INFO ncclCommInitRank comm 0x55acd705aac0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xed8d494d36aadff7 - Init COMPLETE +[2025-10-13 21:31:39,624] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-13 21:31:41,440] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=5.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=5.000000 +Pre-training init connector._connector.0.scores: Mean=5.000005 +Pre-training init connector._connector.2.scores: Mean=4.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-13 21:31:59,761 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-13 21:31:59,766 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1067150:1073716 [5] NCCL INFO ncclCommInitRank comm 0x7fcb1006a900 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x7b048e30308134f5 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067152:1073717 [7] NCCL INFO ncclCommInitRank comm 0x7f1eac06a0c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x7b048e30308134f5 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067148:1073715 [3] NCCL INFO ncclCommInitRank comm 0x7f995c06ad50 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x7b048e30308134f5 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067147:1073719 [2] NCCL INFO ncclCommInitRank comm 0x7feab806a410 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x7b048e30308134f5 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067146:1073720 [1] NCCL INFO ncclCommInitRank comm 0x7fce5806a550 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x7b048e30308134f5 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067149:1073718 [4] NCCL INFO ncclCommInitRank comm 0x7f8d4806acd0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x7b048e30308134f5 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067151:1073721 [6] NCCL INFO ncclCommInitRank comm 0x7f80a406ae80 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x7b048e30308134f5 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1067145:1073714 [0] NCCL INFO ncclCommInitRank comm 0x7ff82006a600 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x7b048e30308134f5 - Init COMPLETE + 0%| | 1/520 [00:14<2:08:43, 14.88s/it] {'loss': 2.2894, 'grad_norm': 0.027400712382118025, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:08:43, 14.88s/it] 0%| | 2/520 [00:18<1:12:15, 8.37s/it] {'loss': 2.2411, 'grad_norm': 0.028357385628300494, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:12:15, 8.37s/it] 1%| | 3/520 [00:22<54:19, 6.31s/it] {'loss': 2.4151, 'grad_norm': 0.03255521266326395, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:22<54:19, 6.31s/it] 1%| | 4/520 [00:26<45:53, 5.34s/it] {'loss': 1.7886, 'grad_norm': 0.011438132573075833, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:26<45:53, 5.34s/it] 1%| | 5/520 [00:30<41:17, 4.81s/it] {'loss': 1.775, 'grad_norm': 0.008083718073102918, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:30<41:17, 4.81s/it] 1%| | 6/520 [00:34<38:33, 4.50s/it] {'loss': 1.5442, 'grad_norm': 0.006676380033946302, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:34<38:33, 4.50s/it] 1%|▏ | 7/520 [00:38<36:40, 4.29s/it] {'loss': 1.5642, 'grad_norm': 0.00923395607877073, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:38<36:40, 4.29s/it] 2%|▏ | 8/520 [00:42<37:04, 4.34s/it] {'loss': 1.5737, 'grad_norm': 0.00638223752827514, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:42<37:04, 4.34s/it] 2%|▏ | 9/520 [00:46<35:32, 4.17s/it] {'loss': 1.5826, 'grad_norm': 0.0035155982506087165, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:46<35:32, 4.17s/it] 2%|▏ | 10/520 [00:49<34:03, 4.01s/it] {'loss': 1.4157, 'grad_norm': 0.0038354059429569643, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<34:03, 4.01s/it] 2%|▏ | 11/520 [00:53<33:17, 3.93s/it] {'loss': 1.4935, 'grad_norm': 0.0047172789896116635, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<33:17, 3.93s/it] 2%|▏ | 12/520 [00:57<32:27, 3.83s/it] {'loss': 1.4156, 'grad_norm': 0.003679359734362824, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<32:27, 3.83s/it][2025-10-13 21:33:06,328] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<33:40, 3.99s/it] {'loss': 1.4261, 'grad_norm': 0.0025718494803094114, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<33:40, 3.99s/it] 3%|▎ | 14/520 [01:05<32:45, 3.88s/it] {'loss': 1.4715, 'grad_norm': 0.003031003267467843, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<32:45, 3.88s/it] 3%|▎ | 15/520 [01:08<32:17, 3.84s/it] {'loss': 1.4574, 'grad_norm': 0.0027118695937291258, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<32:17, 3.84s/it] 3%|▎ | 16/520 [01:12<32:09, 3.83s/it] {'loss': 1.4114, 'grad_norm': 0.0023922093000966296, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<32:09, 3.83s/it] 3%|▎ | 17/520 [01:16<32:01, 3.82s/it] {'loss': 1.4989, 'grad_norm': 0.002347864709510703, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<32:01, 3.82s/it] 3%|▎ | 18/520 [01:20<31:32, 3.77s/it] {'loss': 1.3512, 'grad_norm': 0.0022559449862912403, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:20<31:32, 3.77s/it] 4%|▎ | 19/520 [01:23<31:12, 3.74s/it] {'loss': 1.4056, 'grad_norm': 0.0019444410835344446, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<31:12, 3.74s/it] 4%|▍ | 20/520 [01:27<30:57, 3.71s/it] {'loss': 1.342, 'grad_norm': 0.0026443183764637856, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:27<30:57, 3.71s/it] 4%|▍ | 21/520 [01:31<30:50, 3.71s/it] {'loss': 1.3938, 'grad_norm': 0.002162291535887953, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:31<30:50, 3.71s/it] 4%|▍ | 22/520 [01:34<30:36, 3.69s/it] {'loss': 1.4944, 'grad_norm': 0.0020492821368419826, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:36, 3.69s/it] 4%|▍ | 23/520 [01:38<30:29, 3.68s/it] {'loss': 1.4282, 'grad_norm': 0.0017383622461351158, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:38<30:29, 3.68s/it] 5%|▍ | 24/520 [01:42<30:19, 3.67s/it] {'loss': 1.3809, 'grad_norm': 0.001808318409092457, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:42<30:19, 3.67s/it] 5%|▍ | 25/520 [01:45<30:16, 3.67s/it] {'loss': 1.4316, 'grad_norm': 0.0020837538683622548, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:16, 3.67s/it] 5%|▌ | 26/520 [01:49<30:14, 3.67s/it] {'loss': 1.4139, 'grad_norm': 0.0019112677889922644, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:49<30:14, 3.67s/it] 5%|▌ | 27/520 [01:53<30:09, 3.67s/it] {'loss': 1.3288, 'grad_norm': 0.002076590639233664, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:53<30:09, 3.67s/it] 5%|▌ | 28/520 [01:56<30:03, 3.67s/it] {'loss': 1.333, 'grad_norm': 0.002023547070099091, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<30:03, 3.67s/it] 6%|▌ | 29/520 [02:00<30:01, 3.67s/it] {'loss': 1.3577, 'grad_norm': 0.0018183939846493156, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<30:01, 3.67s/it] 6%|▌ | 30/520 [02:04<30:01, 3.68s/it] {'loss': 1.4546, 'grad_norm': 0.0016994530819765818, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:04<30:01, 3.68s/it] 6%|▌ | 31/520 [02:07<29:53, 3.67s/it] {'loss': 1.3325, 'grad_norm': 0.0016582482870767382, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<29:53, 3.67s/it] 6%|▌ | 32/520 [02:11<29:57, 3.68s/it] {'loss': 1.3104, 'grad_norm': 0.0017144548677373026, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:11<29:57, 3.68s/it] 6%|▋ | 33/520 [02:15<29:50, 3.68s/it] {'loss': 1.3331, 'grad_norm': 0.0016067397495334469, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:15<29:50, 3.68s/it] 7%|▋ | 34/520 [02:18<29:47, 3.68s/it] {'loss': 1.3264, 'grad_norm': 0.001821035305547242, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<29:47, 3.68s/it] 7%|▋ | 35/520 [02:22<29:40, 3.67s/it] {'loss': 1.3374, 'grad_norm': 0.0020789310327025594, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:22<29:40, 3.67s/it] 7%|▋ | 36/520 [02:26<29:33, 3.66s/it] {'loss': 1.4372, 'grad_norm': 0.0016775553592407748, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:26<29:33, 3.66s/it] 7%|▋ | 37/520 [02:29<29:27, 3.66s/it] {'loss': 1.4277, 'grad_norm': 0.0016441731628770047, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:29<29:27, 3.66s/it] 7%|▋ | 38/520 [02:33<29:25, 3.66s/it] {'loss': 1.5076, 'grad_norm': 0.0017056176921682144, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:33<29:25, 3.66s/it] 8%|▊ | 39/520 [02:37<29:20, 3.66s/it] {'loss': 1.3551, 'grad_norm': 0.0018251534658046584, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<29:20, 3.66s/it] 8%|▊ | 40/520 [02:40<29:16, 3.66s/it] {'loss': 1.3882, 'grad_norm': 0.0016549407615668657, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:40<29:16, 3.66s/it] 8%|▊ | 41/520 [02:44<29:08, 3.65s/it] {'loss': 1.3667, 'grad_norm': 0.001656020953292458, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:44<29:08, 3.65s/it] 8%|▊ | 42/520 [02:48<29:03, 3.65s/it] {'loss': 1.3864, 'grad_norm': 0.0021477809188245683, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:48<29:03, 3.65s/it] 8%|▊ | 43/520 [02:51<29:18, 3.69s/it] {'loss': 1.3242, 'grad_norm': 0.0014807524621064689, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<29:18, 3.69s/it] 8%|▊ | 44/520 [02:55<29:33, 3.73s/it] {'loss': 1.4205, 'grad_norm': 0.001571705697038768, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:55<29:33, 3.73s/it] 9%|▊ | 45/520 [02:59<29:39, 3.75s/it] {'loss': 1.3871, 'grad_norm': 0.0017087907334665186, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:59<29:39, 3.75s/it] 9%|▉ | 46/520 [03:03<29:43, 3.76s/it] {'loss': 1.4809, 'grad_norm': 0.00155420474297427, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:03<29:43, 3.76s/it] 9%|▉ | 47/520 [03:07<29:47, 3.78s/it] {'loss': 1.3698, 'grad_norm': 0.0016670346449175692, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:07<29:47, 3.78s/it] 9%|▉ | 48/520 [03:10<29:48, 3.79s/it] {'loss': 1.3502, 'grad_norm': 0.001827482034550879, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:10<29:48, 3.79s/it] 9%|▉ | 49/520 [03:14<29:44, 3.79s/it] {'loss': 1.3896, 'grad_norm': 0.001553268254558859, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:14<29:44, 3.79s/it] 10%|▉ | 50/520 [03:18<29:41, 3.79s/it] {'loss': 1.3817, 'grad_norm': 0.001568719574955584, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:18<29:41, 3.79s/it] 10%|▉ | 51/520 [03:22<29:36, 3.79s/it] {'loss': 1.316, 'grad_norm': 0.0018191411156497173, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:22<29:36, 3.79s/it] 10%|█ | 52/520 [03:26<29:33, 3.79s/it] {'loss': 1.4424, 'grad_norm': 0.0019704450163460624, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:26<29:33, 3.79s/it] 10%|█ | 53/520 [03:29<29:23, 3.78s/it] {'loss': 1.4271, 'grad_norm': 0.001696735445771583, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:29<29:23, 3.78s/it] 10%|█ | 54/520 [03:33<29:02, 3.74s/it] {'loss': 1.3473, 'grad_norm': 0.0016278550724258517, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<29:02, 3.74s/it] 11%|█ | 55/520 [03:37<28:42, 3.71s/it] {'loss': 1.3193, 'grad_norm': 0.0018014579338523236, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<28:42, 3.71s/it] 11%|█ | 56/520 [03:40<28:33, 3.69s/it] {'loss': 1.4462, 'grad_norm': 0.0016678333961604166, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:40<28:33, 3.69s/it] 11%|█ | 57/520 [03:44<28:17, 3.67s/it] {'loss': 1.3116, 'grad_norm': 0.0019269670475459408, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:44<28:17, 3.67s/it] 11%|█ | 58/520 [03:48<28:15, 3.67s/it] {'loss': 1.4731, 'grad_norm': 0.0016756797384671958, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<28:15, 3.67s/it] 11%|█▏ | 59/520 [03:51<28:14, 3.68s/it] {'loss': 1.2882, 'grad_norm': 0.0015036933269961326, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:51<28:14, 3.68s/it] 12%|█▏ | 60/520 [03:55<28:08, 3.67s/it] {'loss': 1.3793, 'grad_norm': 0.001657768711688063, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:55<28:08, 3.67s/it] 12%|█▏ | 61/520 [03:59<28:11, 3.69s/it] {'loss': 1.3771, 'grad_norm': 0.0018279507585967764, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<28:11, 3.69s/it] 12%|█▏ | 62/520 [04:03<28:29, 3.73s/it] {'loss': 1.355, 'grad_norm': 0.0017069050524526912, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:29, 3.73s/it] 12%|█▏ | 63/520 [04:06<28:40, 3.77s/it] {'loss': 1.3473, 'grad_norm': 0.0015546726613596326, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:06<28:40, 3.77s/it] \ No newline at end of file